summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--deps/v8/AUTHORS15
-rw-r--r--deps/v8/ChangeLog89
-rw-r--r--deps/v8/SConstruct6
-rw-r--r--deps/v8/benchmarks/README.txt18
-rw-r--r--deps/v8/benchmarks/base.js51
-rw-r--r--deps/v8/benchmarks/deltablue.js8
-rw-r--r--deps/v8/benchmarks/earley-boyer.js1
-rw-r--r--deps/v8/benchmarks/raytrace.js2521
-rw-r--r--deps/v8/benchmarks/revisions.html23
-rw-r--r--deps/v8/benchmarks/richards.js2
-rw-r--r--deps/v8/benchmarks/run.html12
-rw-r--r--deps/v8/benchmarks/run.js1
-rw-r--r--deps/v8/benchmarks/splay.js378
-rw-r--r--deps/v8/include/v8-debug.h3
-rw-r--r--deps/v8/include/v8.h29
-rwxr-xr-xdeps/v8/src/SConscript16
-rw-r--r--deps/v8/src/accessors.cc74
-rw-r--r--deps/v8/src/accessors.h41
-rw-r--r--deps/v8/src/allocation.cc3
-rw-r--r--deps/v8/src/allocation.h3
-rw-r--r--deps/v8/src/api.cc84
-rw-r--r--deps/v8/src/arguments.h3
-rw-r--r--deps/v8/src/arm/assembler-arm-inl.h3
-rw-r--r--deps/v8/src/arm/assembler-arm.cc7
-rw-r--r--deps/v8/src/arm/assembler-arm.h9
-rw-r--r--deps/v8/src/arm/builtins-arm.cc5
-rw-r--r--deps/v8/src/arm/codegen-arm-inl.h46
-rw-r--r--deps/v8/src/arm/codegen-arm.cc586
-rw-r--r--deps/v8/src/arm/codegen-arm.h11
-rw-r--r--deps/v8/src/arm/constants-arm.h3
-rw-r--r--deps/v8/src/arm/cpu-arm.cc3
-rw-r--r--deps/v8/src/arm/debug-arm.cc3
-rw-r--r--deps/v8/src/arm/disasm-arm.cc3
-rw-r--r--deps/v8/src/arm/frames-arm.cc3
-rw-r--r--deps/v8/src/arm/frames-arm.h3
-rw-r--r--deps/v8/src/arm/ic-arm.cc13
-rw-r--r--deps/v8/src/arm/jump-target-arm.cc147
-rw-r--r--deps/v8/src/arm/macro-assembler-arm.cc28
-rw-r--r--deps/v8/src/arm/macro-assembler-arm.h3
-rw-r--r--deps/v8/src/arm/regexp-macro-assembler-arm.cc3
-rw-r--r--deps/v8/src/arm/regexp-macro-assembler-arm.h3
-rw-r--r--deps/v8/src/arm/register-allocator-arm-inl.h103
-rw-r--r--deps/v8/src/arm/register-allocator-arm.cc55
-rw-r--r--deps/v8/src/arm/register-allocator-arm.h43
-rw-r--r--deps/v8/src/arm/simulator-arm.cc3
-rw-r--r--deps/v8/src/arm/simulator-arm.h3
-rw-r--r--deps/v8/src/arm/stub-cache-arm.cc44
-rw-r--r--deps/v8/src/arm/virtual-frame-arm.cc77
-rw-r--r--deps/v8/src/arm/virtual-frame-arm.h198
-rw-r--r--deps/v8/src/assembler.cc33
-rw-r--r--deps/v8/src/assembler.h11
-rw-r--r--deps/v8/src/ast.cc3
-rw-r--r--deps/v8/src/ast.h3
-rw-r--r--deps/v8/src/bootstrapper.cc80
-rw-r--r--deps/v8/src/bootstrapper.h3
-rw-r--r--deps/v8/src/builtins.cc46
-rw-r--r--deps/v8/src/builtins.h65
-rw-r--r--deps/v8/src/bytecodes-irregexp.h3
-rw-r--r--deps/v8/src/char-predicates-inl.h3
-rw-r--r--deps/v8/src/char-predicates.h3
-rw-r--r--deps/v8/src/code-stubs.cc5
-rw-r--r--deps/v8/src/code-stubs.h7
-rw-r--r--deps/v8/src/code.h3
-rw-r--r--deps/v8/src/codegen-inl.h27
-rw-r--r--deps/v8/src/codegen.cc98
-rw-r--r--deps/v8/src/codegen.h89
-rw-r--r--deps/v8/src/compilation-cache.cc159
-rw-r--r--deps/v8/src/compilation-cache.h18
-rw-r--r--deps/v8/src/compiler.cc49
-rw-r--r--deps/v8/src/compiler.h22
-rw-r--r--deps/v8/src/contexts.cc3
-rw-r--r--deps/v8/src/contexts.h6
-rw-r--r--deps/v8/src/conversions-inl.h3
-rw-r--r--deps/v8/src/conversions.cc3
-rw-r--r--deps/v8/src/conversions.h3
-rw-r--r--deps/v8/src/counters.cc3
-rw-r--r--deps/v8/src/counters.h5
-rw-r--r--deps/v8/src/cpu.h3
-rw-r--r--deps/v8/src/d8-posix.cc5
-rw-r--r--deps/v8/src/d8.cc12
-rw-r--r--deps/v8/src/d8.js56
-rw-r--r--deps/v8/src/dateparser-inl.h3
-rw-r--r--deps/v8/src/dateparser.cc3
-rw-r--r--deps/v8/src/dateparser.h3
-rw-r--r--deps/v8/src/debug-agent.cc3
-rw-r--r--deps/v8/src/debug-agent.h3
-rw-r--r--deps/v8/src/debug-delay.js267
-rw-r--r--deps/v8/src/debug.cc319
-rw-r--r--deps/v8/src/debug.h136
-rw-r--r--deps/v8/src/disassembler.cc12
-rw-r--r--deps/v8/src/disassembler.h3
-rw-r--r--deps/v8/src/execution.cc44
-rw-r--r--deps/v8/src/execution.h7
-rw-r--r--deps/v8/src/factory.cc14
-rw-r--r--deps/v8/src/factory.h10
-rw-r--r--deps/v8/src/flag-definitions.h6
-rw-r--r--deps/v8/src/flags.cc7
-rw-r--r--deps/v8/src/flags.h3
-rw-r--r--deps/v8/src/frame-element.h265
-rw-r--r--deps/v8/src/frames-inl.h3
-rw-r--r--deps/v8/src/frames.cc3
-rw-r--r--deps/v8/src/frames.h6
-rw-r--r--deps/v8/src/func-name-inferrer.cc3
-rw-r--r--deps/v8/src/func-name-inferrer.h41
-rw-r--r--deps/v8/src/global-handles.cc3
-rw-r--r--deps/v8/src/global-handles.h3
-rw-r--r--deps/v8/src/globals.h51
-rw-r--r--deps/v8/src/handles-inl.h3
-rw-r--r--deps/v8/src/handles.cc46
-rw-r--r--deps/v8/src/handles.h15
-rw-r--r--deps/v8/src/hashmap.cc73
-rw-r--r--deps/v8/src/hashmap.h6
-rw-r--r--deps/v8/src/heap-inl.h26
-rw-r--r--deps/v8/src/heap.cc195
-rw-r--r--deps/v8/src/heap.h16
-rw-r--r--deps/v8/src/ia32/assembler-ia32-inl.h21
-rw-r--r--deps/v8/src/ia32/assembler-ia32.cc42
-rw-r--r--deps/v8/src/ia32/assembler-ia32.h41
-rw-r--r--deps/v8/src/ia32/builtins-ia32.cc5
-rw-r--r--deps/v8/src/ia32/codegen-ia32-inl.h46
-rw-r--r--deps/v8/src/ia32/codegen-ia32.cc1938
-rw-r--r--deps/v8/src/ia32/codegen-ia32.h16
-rw-r--r--deps/v8/src/ia32/cpu-ia32.cc5
-rw-r--r--deps/v8/src/ia32/debug-ia32.cc3
-rw-r--r--deps/v8/src/ia32/frames-ia32.cc3
-rw-r--r--deps/v8/src/ia32/frames-ia32.h3
-rw-r--r--deps/v8/src/ia32/ic-ia32.cc13
-rw-r--r--deps/v8/src/ia32/jump-target-ia32.cc239
-rw-r--r--deps/v8/src/ia32/macro-assembler-ia32.cc9
-rw-r--r--deps/v8/src/ia32/macro-assembler-ia32.h3
-rw-r--r--deps/v8/src/ia32/regexp-macro-assembler-ia32.cc8
-rw-r--r--deps/v8/src/ia32/regexp-macro-assembler-ia32.h3
-rw-r--r--deps/v8/src/ia32/register-allocator-ia32-inl.h82
-rw-r--r--deps/v8/src/ia32/register-allocator-ia32.cc71
-rw-r--r--deps/v8/src/ia32/register-allocator-ia32.h43
-rw-r--r--deps/v8/src/ia32/stub-cache-ia32.cc51
-rw-r--r--deps/v8/src/ia32/virtual-frame-ia32.cc320
-rw-r--r--deps/v8/src/ia32/virtual-frame-ia32.h203
-rw-r--r--deps/v8/src/ic-inl.h3
-rw-r--r--deps/v8/src/ic.cc59
-rw-r--r--deps/v8/src/ic.h8
-rw-r--r--deps/v8/src/interpreter-irregexp.cc3
-rw-r--r--deps/v8/src/interpreter-irregexp.h3
-rw-r--r--deps/v8/src/json-delay.js2
-rw-r--r--deps/v8/src/jsregexp-inl.h3
-rw-r--r--deps/v8/src/jsregexp.cc37
-rw-r--r--deps/v8/src/jsregexp.h14
-rw-r--r--deps/v8/src/jump-target-inl.h49
-rw-r--r--deps/v8/src/jump-target.cc494
-rw-r--r--deps/v8/src/jump-target.h125
-rw-r--r--deps/v8/src/list-inl.h27
-rw-r--r--deps/v8/src/list.h9
-rw-r--r--deps/v8/src/log-utils.cc302
-rw-r--r--deps/v8/src/log-utils.h223
-rw-r--r--deps/v8/src/log.cc542
-rw-r--r--deps/v8/src/log.h33
-rw-r--r--deps/v8/src/mark-compact.cc7
-rw-r--r--deps/v8/src/mark-compact.h3
-rw-r--r--deps/v8/src/memory.h11
-rw-r--r--deps/v8/src/messages.cc3
-rw-r--r--deps/v8/src/messages.h6
-rw-r--r--deps/v8/src/mirror-delay.js409
-rw-r--r--deps/v8/src/natives.h3
-rw-r--r--deps/v8/src/objects-debug.cc5
-rw-r--r--deps/v8/src/objects-inl.h48
-rw-r--r--deps/v8/src/objects.cc223
-rw-r--r--deps/v8/src/objects.h217
-rw-r--r--deps/v8/src/oprofile-agent.cc3
-rw-r--r--deps/v8/src/oprofile-agent.h3
-rw-r--r--deps/v8/src/parser.cc143
-rw-r--r--deps/v8/src/parser.h3
-rw-r--r--deps/v8/src/platform-freebsd.cc3
-rw-r--r--deps/v8/src/platform-linux.cc19
-rw-r--r--deps/v8/src/platform-macos.cc13
-rw-r--r--deps/v8/src/platform-nullos.cc3
-rw-r--r--deps/v8/src/platform-posix.cc3
-rw-r--r--deps/v8/src/platform-win32.cc9
-rw-r--r--deps/v8/src/platform.h13
-rw-r--r--deps/v8/src/prettyprinter.cc10
-rw-r--r--deps/v8/src/prettyprinter.h3
-rw-r--r--deps/v8/src/property.cc3
-rw-r--r--deps/v8/src/property.h3
-rw-r--r--deps/v8/src/regexp-macro-assembler-irregexp-inl.h3
-rw-r--r--deps/v8/src/regexp-macro-assembler-irregexp.cc3
-rw-r--r--deps/v8/src/regexp-macro-assembler-irregexp.h3
-rw-r--r--deps/v8/src/regexp-macro-assembler-tracer.cc3
-rw-r--r--deps/v8/src/regexp-macro-assembler-tracer.h3
-rw-r--r--deps/v8/src/regexp-macro-assembler.cc3
-rw-r--r--deps/v8/src/regexp-macro-assembler.h3
-rw-r--r--deps/v8/src/regexp-stack.cc3
-rw-r--r--deps/v8/src/regexp-stack.h3
-rw-r--r--deps/v8/src/register-allocator-inl.h33
-rw-r--r--deps/v8/src/register-allocator.cc71
-rw-r--r--deps/v8/src/register-allocator.h210
-rw-r--r--deps/v8/src/rewriter.cc5
-rw-r--r--deps/v8/src/rewriter.h3
-rw-r--r--deps/v8/src/runtime.cc193
-rw-r--r--deps/v8/src/runtime.h22
-rw-r--r--deps/v8/src/runtime.js12
-rw-r--r--deps/v8/src/scanner.cc461
-rw-r--r--deps/v8/src/scanner.h35
-rw-r--r--deps/v8/src/scopeinfo.cc4
-rw-r--r--deps/v8/src/scopeinfo.h15
-rw-r--r--deps/v8/src/scopes.cc63
-rw-r--r--deps/v8/src/scopes.h23
-rw-r--r--deps/v8/src/serialize.cc3
-rw-r--r--deps/v8/src/serialize.h3
-rw-r--r--deps/v8/src/shell.h3
-rw-r--r--deps/v8/src/smart-pointer.h3
-rw-r--r--deps/v8/src/snapshot-common.cc3
-rw-r--r--deps/v8/src/snapshot-empty.cc3
-rw-r--r--deps/v8/src/snapshot.h3
-rw-r--r--deps/v8/src/spaces-inl.h14
-rw-r--r--deps/v8/src/spaces.cc46
-rw-r--r--deps/v8/src/spaces.h8
-rw-r--r--deps/v8/src/string-stream.cc3
-rw-r--r--deps/v8/src/string-stream.h3
-rw-r--r--deps/v8/src/string.js28
-rw-r--r--deps/v8/src/stub-cache.cc81
-rw-r--r--deps/v8/src/stub-cache.h40
-rw-r--r--deps/v8/src/token.cc3
-rw-r--r--deps/v8/src/token.h5
-rw-r--r--deps/v8/src/top.cc16
-rw-r--r--deps/v8/src/top.h30
-rw-r--r--deps/v8/src/usage-analyzer.cc4
-rw-r--r--deps/v8/src/usage-analyzer.h3
-rw-r--r--deps/v8/src/utils.cc17
-rw-r--r--deps/v8/src/utils.h34
-rw-r--r--deps/v8/src/v8-counters.cc3
-rw-r--r--deps/v8/src/v8-counters.h37
-rw-r--r--deps/v8/src/v8.cc27
-rw-r--r--deps/v8/src/v8.h17
-rw-r--r--deps/v8/src/v8natives.js16
-rw-r--r--deps/v8/src/v8threads.cc2
-rw-r--r--deps/v8/src/v8threads.h3
-rw-r--r--deps/v8/src/variables.cc3
-rw-r--r--deps/v8/src/variables.h3
-rw-r--r--deps/v8/src/version.cc5
-rw-r--r--deps/v8/src/version.h3
-rw-r--r--deps/v8/src/virtual-frame.cc235
-rw-r--r--deps/v8/src/virtual-frame.h173
-rw-r--r--deps/v8/src/x64/assembler-x64-inl.h250
-rw-r--r--deps/v8/src/x64/assembler-x64.cc1513
-rw-r--r--deps/v8/src/x64/assembler-x64.h688
-rw-r--r--deps/v8/src/x64/builtins-x64.cc38
-rw-r--r--deps/v8/src/x64/codegen-x64-inl.h42
-rw-r--r--deps/v8/src/x64/codegen-x64.cc309
-rw-r--r--deps/v8/src/x64/codegen-x64.h25
-rw-r--r--deps/v8/src/x64/cpu-x64.cc39
-rw-r--r--deps/v8/src/x64/debug-x64.cc56
-rw-r--r--deps/v8/src/x64/disasm-x64.cc61
-rw-r--r--deps/v8/src/x64/frames-x64.h17
-rw-r--r--deps/v8/src/x64/ic-x64.cc149
-rw-r--r--deps/v8/src/x64/macro-assembler-x64.cc89
-rw-r--r--deps/v8/src/x64/macro-assembler-x64.h14
-rw-r--r--deps/v8/src/x64/register-allocator-x64-inl.h69
-rw-r--r--deps/v8/src/x64/register-allocator-x64.h45
-rw-r--r--deps/v8/src/x64/virtual-frame-x64.h203
-rw-r--r--deps/v8/src/zone-inl.h3
-rw-r--r--deps/v8/src/zone.cc3
-rw-r--r--deps/v8/src/zone.h12
-rw-r--r--deps/v8/test/cctest/SConscript2
-rw-r--r--deps/v8/test/cctest/cctest.status9
-rw-r--r--deps/v8/test/cctest/test-api.cc386
-rw-r--r--deps/v8/test/cctest/test-assembler-x64.cc251
-rw-r--r--deps/v8/test/cctest/test-debug.cc541
-rw-r--r--deps/v8/test/cctest/test-func-name-inference.cc17
-rw-r--r--deps/v8/test/cctest/test-hashmap.cc73
-rw-r--r--deps/v8/test/cctest/test-heap.cc39
-rw-r--r--deps/v8/test/cctest/test-list.cc34
-rw-r--r--deps/v8/test/cctest/test-log-ia32.cc77
-rw-r--r--deps/v8/test/cctest/test-log-utils.cc132
-rw-r--r--deps/v8/test/cctest/test-log.cc621
-rw-r--r--deps/v8/test/cctest/test-utils.cc7
-rw-r--r--deps/v8/test/cctest/test-version.cc3
-rw-r--r--deps/v8/test/mjsunit/codegen-coverage.js (renamed from deps/v8/test/mjsunit/codegen_coverage.js)0
-rw-r--r--deps/v8/test/mjsunit/debug-backtrace.js24
-rw-r--r--deps/v8/test/mjsunit/debug-compile-event.js39
-rw-r--r--deps/v8/test/mjsunit/debug-references.js2
-rw-r--r--deps/v8/test/mjsunit/debug-scripts-request.js21
-rw-r--r--deps/v8/test/mjsunit/mirror-array.js5
-rw-r--r--deps/v8/test/mjsunit/mirror-boolean.js2
-rw-r--r--deps/v8/test/mjsunit/mirror-date.js18
-rw-r--r--deps/v8/test/mjsunit/mirror-error.js5
-rw-r--r--deps/v8/test/mjsunit/mirror-function.js5
-rw-r--r--deps/v8/test/mjsunit/mirror-null.js2
-rw-r--r--deps/v8/test/mjsunit/mirror-number.js2
-rw-r--r--deps/v8/test/mjsunit/mirror-object.js8
-rw-r--r--deps/v8/test/mjsunit/mirror-regexp.js5
-rw-r--r--deps/v8/test/mjsunit/mirror-script.js32
-rw-r--r--deps/v8/test/mjsunit/mirror-string.js2
-rw-r--r--deps/v8/test/mjsunit/mirror-undefined.js2
-rw-r--r--deps/v8/test/mjsunit/mirror-unresolved-function.js5
-rw-r--r--deps/v8/test/mjsunit/regexp.js13
-rw-r--r--deps/v8/test/mjsunit/regress/regress-334.js (renamed from deps/v8/test/mjsunit/bugs/bug-334.js)0
-rw-r--r--deps/v8/test/mjsunit/regress/regress-341.js36
-rw-r--r--deps/v8/test/mjsunit/regress/regress-349.js32
-rw-r--r--deps/v8/test/mjsunit/regress/regress-351.js31
-rw-r--r--deps/v8/test/mjsunit/string-lastindexof.js63
-rw-r--r--deps/v8/tools/gyp/v8.gyp19
-rw-r--r--deps/v8/tools/linux-tick-processor23
-rw-r--r--deps/v8/tools/profile_view.js191
-rwxr-xr-xdeps/v8/tools/run-valgrind.py2
-rwxr-xr-xdeps/v8/tools/test.py10
-rw-r--r--deps/v8/tools/tickprocessor.js4
-rwxr-xr-xdeps/v8/tools/v8.xcodeproj/project.pbxproj8
-rw-r--r--deps/v8/tools/visual_studio/v8_base.vcproj16
-rw-r--r--deps/v8/tools/visual_studio/v8_base_arm.vcproj8
-rw-r--r--deps/v8/tools/visual_studio/v8_cctest.vcproj4
-rw-r--r--deps/v8/tools/visual_studio/v8_cctest_arm.vcproj4
310 files changed, 13940 insertions, 8075 deletions
diff --git a/deps/v8/AUTHORS b/deps/v8/AUTHORS
index 060393f64..9b198d077 100644
--- a/deps/v8/AUTHORS
+++ b/deps/v8/AUTHORS
@@ -5,11 +5,14 @@
Google Inc.
-Rene Rebe <rene@exactcode.de>
-Rafal Krypa <rafal@krypa.net>
-Jay Freeman <saurik@saurik.com>
-Daniel James <dnljms@gmail.com>
-Paolo Giarrusso <p.giarrusso@gmail.com>
-Daniel Andersson <kodandersson@gmail.com>
Alexander Botero-Lowry <alexbl@FreeBSD.org>
+Craig Schlenter <craig.schlenter@gmail.com>
+Daniel Andersson <kodandersson@gmail.com>
+Daniel James <dnljms@gmail.com>
+Jay Freeman <saurik@saurik.com>
+Joel Stanley <joel.stan@gmail.com>
Matt Hanselman <mjhanselman@gmail.com>
+Paolo Giarrusso <p.giarrusso@gmail.com>
+Rafal Krypa <rafal@krypa.net>
+Rene Rebe <rene@exactcode.de>
+Ryan Dahl <coldredlemur@gmail.com>
diff --git a/deps/v8/ChangeLog b/deps/v8/ChangeLog
index 2ff993659..3df6885a2 100644
--- a/deps/v8/ChangeLog
+++ b/deps/v8/ChangeLog
@@ -1,3 +1,92 @@
+2009-06-08: Version 1.2.7
+
+ Improved debugger and profiler support.
+
+ Reduced compilation time by improving the handling of deferred
+ code.
+
+ Optimized interceptor accesses where the property is on the object
+ on which the interceptors is attached.
+
+ Fixed compilation problem on GCC 4.4 by changing the stack
+ alignment to 16 bytes.
+
+ Fixed handle creation to follow stric aliasing rules.
+
+ Fixed compilation on FreeBSD.
+
+ Introduced API for forcing the deletion of a property ignoring
+ interceptors and attributes.
+
+
+2009-05-29: Version 1.2.6
+
+ Added a histogram recording hit rates at different levels of the
+ compilation cache.
+
+ Added stack overflow check for the RegExp analysis phase. Previously a
+ very long regexp graph could overflow the stack with recursive calls.
+
+ Use a dynamic buffer when collecting log events in memory.
+
+ Added start/stop events to the profiler log.
+
+ Fixed infinite loop which could happen when setting a debug break while
+ executing a RegExp compiled to native code.
+
+ Fixed handling of lastIndexOf called with negative index (issue 351).
+
+ Fixed irregular crash in profiler test (issue 358).
+
+ Fixed compilation issues with some versions of gcc.
+
+
+2009-05-26: Version 1.2.5
+
+ Fixed bug in initial boundary check for Boyer-Moore text
+ search (issue 349).
+
+ Fixed compilation issues with MinGW and gcc 4.3+ and added support
+ for armv7 and cortex-a8 architectures. Patches by Lei Zhang and
+ Craig Schlenter.
+
+ Added a script cache to the debugger.
+
+ Optimized compilation performance by improving internal data
+ structures and avoiding expensive property load optimizations for
+ code that's infrequently executed.
+
+ Exposed the calling JavaScript context through the static API
+ function Context::GetCalling().
+
+
+2009-05-18: Version 1.2.4
+
+ Improved performance of floating point number allocation for ARM
+ platforms.
+
+ Fixed crash when using the instanceof operator on functions with
+ number values in their prototype chain (issue 341).
+
+ Optimized virtual frame operations in the code generator to speed
+ up compilation time and allocated the frames in the zone.
+
+ Made the representation of virtual frames and jump targets in the
+ code generator much more compact.
+
+ Avoided linear search for non-locals in scope code when resolving
+ variables inside with and eval scopes.
+
+ Optimized lexical scanner by dealing with whitespace as part of
+ the token scanning instead of as a separate step before it.
+
+ Changed the scavenging collector so that promoted objects do not
+ reside in the old generation while their remembered set is being
+ swept for pointers into the young generation.
+
+ Fixed numeric overflow handling when compiling count operations.
+
+
2009-05-11: Version 1.2.3
Fixed bug in reporting of out-of-memory situations.
diff --git a/deps/v8/SConstruct b/deps/v8/SConstruct
index a89292629..3b14eea27 100644
--- a/deps/v8/SConstruct
+++ b/deps/v8/SConstruct
@@ -44,10 +44,10 @@ if ANDROID_TOP is None:
ANDROID_TOP=""
# TODO: Sort these issues out properly but as a temporary solution for gcc 4.4
-# on linux we need these compiler flags to avoid a mksnapshot segfault, avoid
-# crashes in the v8 test suite and avoid dtoa.c strict aliasing issues
+# on linux we need these compiler flags to avoid crashes in the v8 test suite
+# and avoid dtoa.c strict aliasing issues
if os.environ.get('GCC_VERSION') == '44':
- GCC_EXTRA_CCFLAGS = ['-fno-tree-vectorize', '-fno-tree-vrp']
+ GCC_EXTRA_CCFLAGS = ['-fno-tree-vrp']
GCC_DTOA_EXTRA_CCFLAGS = ['-fno-strict-aliasing']
else:
GCC_EXTRA_CCFLAGS = []
diff --git a/deps/v8/benchmarks/README.txt b/deps/v8/benchmarks/README.txt
index 46aa57328..561e88b39 100644
--- a/deps/v8/benchmarks/README.txt
+++ b/deps/v8/benchmarks/README.txt
@@ -40,3 +40,21 @@ pages where it occurs and the number of times it is executed while
loading each page. Finally the literal letters in the data are
encoded using ROT13 in a way that does not affect how the regexps
match their input.
+
+
+Changes from Version 3 to Version 4
+===================================
+
+The Splay benchmark is a newcomer in version 4. It manipulates a
+splay tree by adding and removing data nodes, thus exercising the
+memory management subsystem of the JavaScript engine.
+
+Furthermore, all the unused parts of the Prototype library were
+removed from the RayTrace benchmark. This does not affect the running
+of the benchmark.
+
+
+Changes from Version 4 to Version 5
+===================================
+
+Removed duplicate line in random seed code.
diff --git a/deps/v8/benchmarks/base.js b/deps/v8/benchmarks/base.js
index 261733208..67cddd205 100644
--- a/deps/v8/benchmarks/base.js
+++ b/deps/v8/benchmarks/base.js
@@ -31,10 +31,15 @@
// A benchmark has a name (string) and a function that will be run to
-// do the performance measurement.
-function Benchmark(name, run) {
+// do the performance measurement. The optional setup and tearDown
+// arguments are functions that will be invoked before and after
+// running the benchmark, but the running time of these functions will
+// not be accounted for in the benchmark score.
+function Benchmark(name, run, setup, tearDown) {
this.name = name;
this.run = run;
+ this.Setup = setup ? setup : function() { };
+ this.TearDown = tearDown ? tearDown : function() { };
}
@@ -73,7 +78,7 @@ BenchmarkSuite.suites = [];
// Scores are not comparable across versions. Bump the version if
// you're making changes that will affect that scores, e.g. if you add
// a new benchmark or change an existing one.
-BenchmarkSuite.version = '3';
+BenchmarkSuite.version = '5';
// To make the benchmark results predictable, we replace Math.random
@@ -86,7 +91,6 @@ Math.random = (function() {
seed = ((seed ^ 0xc761c23c) ^ (seed >>> 19)) & 0xffffffff;
seed = ((seed + 0x165667b1) + (seed << 5)) & 0xffffffff;
seed = ((seed + 0xd3a2646c) ^ (seed << 9)) & 0xffffffff;
- seed = ((seed + 0xd3a2646c) ^ (seed << 9)) & 0xffffffff;
seed = ((seed + 0xfd7046c5) + (seed << 3)) & 0xffffffff;
seed = ((seed ^ 0xb55a4f09) ^ (seed >>> 16)) & 0xffffffff;
return (seed & 0xfffffff) / 0x10000000;
@@ -114,7 +118,7 @@ BenchmarkSuite.RunSuites = function(runner) {
continuation = suite.RunStep(runner);
}
if (continuation && typeof window != 'undefined' && window.setTimeout) {
- window.setTimeout(RunStep, 100);
+ window.setTimeout(RunStep, 25);
return;
}
}
@@ -194,7 +198,7 @@ BenchmarkSuite.prototype.NotifyError = function(error) {
// Runs a single benchmark for at least a second and computes the
// average time it takes to run a single iteration.
-BenchmarkSuite.prototype.RunSingle = function(benchmark) {
+BenchmarkSuite.prototype.RunSingleBenchmark = function(benchmark) {
var elapsed = 0;
var start = new Date();
for (var n = 0; elapsed < 1000; n++) {
@@ -216,18 +220,45 @@ BenchmarkSuite.prototype.RunStep = function(runner) {
var length = this.benchmarks.length;
var index = 0;
var suite = this;
- function RunNext() {
+
+ // Run the setup, the actual benchmark, and the tear down in three
+ // separate steps to allow the framework to yield between any of the
+ // steps.
+
+ function RunNextSetup() {
if (index < length) {
try {
- suite.RunSingle(suite.benchmarks[index++]);
+ suite.benchmarks[index].Setup();
} catch (e) {
suite.NotifyError(e);
return null;
}
- return RunNext;
+ return RunNextBenchmark;
}
suite.NotifyResult();
return null;
}
- return RunNext();
+
+ function RunNextBenchmark() {
+ try {
+ suite.RunSingleBenchmark(suite.benchmarks[index]);
+ } catch (e) {
+ suite.NotifyError(e);
+ return null;
+ }
+ return RunNextTearDown;
+ }
+
+ function RunNextTearDown() {
+ try {
+ suite.benchmarks[index++].TearDown();
+ } catch (e) {
+ suite.NotifyError(e);
+ return null;
+ }
+ return RunNextSetup;
+ }
+
+ // Start out running the setup.
+ return RunNextSetup();
}
diff --git a/deps/v8/benchmarks/deltablue.js b/deps/v8/benchmarks/deltablue.js
index b51afd1d2..253046f80 100644
--- a/deps/v8/benchmarks/deltablue.js
+++ b/deps/v8/benchmarks/deltablue.js
@@ -16,10 +16,10 @@
// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-// This implementation of the DeltaBlue benchmark is derived
-// from the Smalltalk implementation by John Maloney and Mario
-// Wolczko. Some parts have been translated directly, whereas
-// others have been modified more aggresively to make it feel
+// This implementation of the DeltaBlue benchmark is derived
+// from the Smalltalk implementation by John Maloney and Mario
+// Wolczko. Some parts have been translated directly, whereas
+// others have been modified more aggresively to make it feel
// more like a JavaScript program.
diff --git a/deps/v8/benchmarks/earley-boyer.js b/deps/v8/benchmarks/earley-boyer.js
index 9016a137a..3c7f922c4 100644
--- a/deps/v8/benchmarks/earley-boyer.js
+++ b/deps/v8/benchmarks/earley-boyer.js
@@ -4682,4 +4682,3 @@ function RunBenchmark(name, count, run, warn) {
}
var BgL_runzd2benchmarkzd2 = RunBenchmark;
-
diff --git a/deps/v8/benchmarks/raytrace.js b/deps/v8/benchmarks/raytrace.js
index 925d0ed35..c68b0383a 100644
--- a/deps/v8/benchmarks/raytrace.js
+++ b/deps/v8/benchmarks/raytrace.js
@@ -5,7 +5,7 @@
//
// It has been modified slightly by Google to work as a standalone
// benchmark, but the all the computational code remains
-// untouched. This file also contains a copy of the Prototype
+// untouched. This file also contains a copy of parts of the Prototype
// JavaScript framework which is used by the ray tracer.
var RayTrace = new BenchmarkSuite('RayTrace', 932666, [
@@ -13,39 +13,22 @@ var RayTrace = new BenchmarkSuite('RayTrace', 932666, [
]);
+// Variable used to hold a number that can be used to verify that
+// the scene was ray traced correctly.
var checkNumber;
-// Create dummy objects if we're not running in a browser.
-if (typeof document == 'undefined') {
- document = { };
- window = { opera: null };
- navigator = { userAgent: null, appVersion: "" };
-}
-
// ------------------------------------------------------------------------
// ------------------------------------------------------------------------
+// The following is a copy of parts of the Prototype JavaScript library:
+
+// Prototype JavaScript framework, version 1.5.0
+// (c) 2005-2007 Sam Stephenson
+//
+// Prototype is freely distributable under the terms of an MIT-style license.
+// For details, see the Prototype web site: http://prototype.conio.net/
-/* Prototype JavaScript framework, version 1.5.0
- * (c) 2005-2007 Sam Stephenson
- *
- * Prototype is freely distributable under the terms of an MIT-style license.
- * For details, see the Prototype web site: http://prototype.conio.net/
- *
-/*--------------------------------------------------------------------------*/
-
-//--------------------
-var Prototype = {
- Version: '1.5.0',
- BrowserFeatures: {
- XPath: !!document.evaluate
- },
-
- ScriptFragment: '(?:<script.*?>)((\n|\r|.)*?)(?:<\/script>)',
- emptyFunction: function() {},
- K: function(x) { return x }
-}
var Class = {
create: function() {
@@ -53,2497 +36,16 @@ var Class = {
this.initialize.apply(this, arguments);
}
}
-}
+};
-var Abstract = new Object();
Object.extend = function(destination, source) {
for (var property in source) {
destination[property] = source[property];
}
return destination;
-}
-
-Object.extend(Object, {
- inspect: function(object) {
- try {
- if (object === undefined) return 'undefined';
- if (object === null) return 'null';
- return object.inspect ? object.inspect() : object.toString();
- } catch (e) {
- if (e instanceof RangeError) return '...';
- throw e;
- }
- },
-
- keys: function(object) {
- var keys = [];
- for (var property in object)
- keys.push(property);
- return keys;
- },
-
- values: function(object) {
- var values = [];
- for (var property in object)
- values.push(object[property]);
- return values;
- },
-
- clone: function(object) {
- return Object.extend({}, object);
- }
-});
-
-Function.prototype.bind = function() {
- var __method = this, args = $A(arguments), object = args.shift();
- return function() {
- return __method.apply(object, args.concat($A(arguments)));
- }
-}
-
-Function.prototype.bindAsEventListener = function(object) {
- var __method = this, args = $A(arguments), object = args.shift();
- return function(event) {
- return __method.apply(object, [( event || window.event)].concat(args).concat($A(arguments)));
- }
-}
-
-Object.extend(Number.prototype, {
- toColorPart: function() {
- var digits = this.toString(16);
- if (this < 16) return '0' + digits;
- return digits;
- },
-
- succ: function() {
- return this + 1;
- },
-
- times: function(iterator) {
- $R(0, this, true).each(iterator);
- return this;
- }
-});
-
-var Try = {
- these: function() {
- var returnValue;
-
- for (var i = 0, length = arguments.length; i < length; i++) {
- var lambda = arguments[i];
- try {
- returnValue = lambda();
- break;
- } catch (e) {}
- }
-
- return returnValue;
- }
-}
-
-/*--------------------------------------------------------------------------*/
-
-var PeriodicalExecuter = Class.create();
-PeriodicalExecuter.prototype = {
- initialize: function(callback, frequency) {
- this.callback = callback;
- this.frequency = frequency;
- this.currentlyExecuting = false;
-
- this.registerCallback();
- },
-
- registerCallback: function() {
- this.timer = setInterval(this.onTimerEvent.bind(this), this.frequency * 1000);
- },
-
- stop: function() {
- if (!this.timer) return;
- clearInterval(this.timer);
- this.timer = null;
- },
-
- onTimerEvent: function() {
- if (!this.currentlyExecuting) {
- try {
- this.currentlyExecuting = true;
- this.callback(this);
- } finally {
- this.currentlyExecuting = false;
- }
- }
- }
-}
-String.interpret = function(value){
- return value == null ? '' : String(value);
-}
-
-Object.extend(String.prototype, {
- gsub: function(pattern, replacement) {
- var result = '', source = this, match;
- replacement = arguments.callee.prepareReplacement(replacement);
-
- while (source.length > 0) {
- if (match = source.match(pattern)) {
- result += source.slice(0, match.index);
- result += String.interpret(replacement(match));
- source = source.slice(match.index + match[0].length);
- } else {
- result += source, source = '';
- }
- }
- return result;
- },
-
- sub: function(pattern, replacement, count) {
- replacement = this.gsub.prepareReplacement(replacement);
- count = count === undefined ? 1 : count;
-
- return this.gsub(pattern, function(match) {
- if (--count < 0) return match[0];
- return replacement(match);
- });
- },
-
- scan: function(pattern, iterator) {
- this.gsub(pattern, iterator);
- return this;
- },
-
- truncate: function(length, truncation) {
- length = length || 30;
- truncation = truncation === undefined ? '...' : truncation;
- return this.length > length ?
- this.slice(0, length - truncation.length) + truncation : this;
- },
-
- strip: function() {
- return this.replace(/^\s+/, '').replace(/\s+$/, '');
- },
-
- stripTags: function() {
- return this.replace(/<\/?[^>]+>/gi, '');
- },
-
- stripScripts: function() {
- return this.replace(new RegExp(Prototype.ScriptFragment, 'img'), '');
- },
-
- extractScripts: function() {
- var matchAll = new RegExp(Prototype.ScriptFragment, 'img');
- var matchOne = new RegExp(Prototype.ScriptFragment, 'im');
- return (this.match(matchAll) || []).map(function(scriptTag) {
- return (scriptTag.match(matchOne) || ['', ''])[1];
- });
- },
-
- evalScripts: function() {
- return this.extractScripts().map(function(script) { return eval(script) });
- },
-
- escapeHTML: function() {
- var div = document.createElement('div');
- var text = document.createTextNode(this);
- div.appendChild(text);
- return div.innerHTML;
- },
-
- unescapeHTML: function() {
- var div = document.createElement('div');
- div.innerHTML = this.stripTags();
- return div.childNodes[0] ? (div.childNodes.length > 1 ?
- $A(div.childNodes).inject('',function(memo,node){ return memo+node.nodeValue }) :
- div.childNodes[0].nodeValue) : '';
- },
-
- toQueryParams: function(separator) {
- var match = this.strip().match(/([^?#]*)(#.*)?$/);
- if (!match) return {};
-
- return match[1].split(separator || '&').inject({}, function(hash, pair) {
- if ((pair = pair.split('='))[0]) {
- var name = decodeURIComponent(pair[0]);
- var value = pair[1] ? decodeURIComponent(pair[1]) : undefined;
-
- if (hash[name] !== undefined) {
- if (hash[name].constructor != Array)
- hash[name] = [hash[name]];
- if (value) hash[name].push(value);
- }
- else hash[name] = value;
- }
- return hash;
- });
- },
-
- toArray: function() {
- return this.split('');
- },
-
- succ: function() {
- return this.slice(0, this.length - 1) +
- String.fromCharCode(this.charCodeAt(this.length - 1) + 1);
- },
-
- camelize: function() {
- var parts = this.split('-'), len = parts.length;
- if (len == 1) return parts[0];
-
- var camelized = this.charAt(0) == '-'
- ? parts[0].charAt(0).toUpperCase() + parts[0].substring(1)
- : parts[0];
-
- for (var i = 1; i < len; i++)
- camelized += parts[i].charAt(0).toUpperCase() + parts[i].substring(1);
-
- return camelized;
- },
-
- capitalize: function(){
- return this.charAt(0).toUpperCase() + this.substring(1).toLowerCase();
- },
-
- underscore: function() {
- return this.gsub(/::/, '/').gsub(/([A-Z]+)([A-Z][a-z])/,'#{1}_#{2}').gsub(/([a-z\d])([A-Z])/,'#{1}_#{2}').gsub(/-/,'_').toLowerCase();
- },
-
- dasherize: function() {
- return this.gsub(/_/,'-');
- },
-
- inspect: function(useDoubleQuotes) {
- var escapedString = this.replace(/\\/g, '\\\\');
- if (useDoubleQuotes)
- return '"' + escapedString.replace(/"/g, '\\"') + '"';
- else
- return "'" + escapedString.replace(/'/g, '\\\'') + "'";
- }
-});
-
-String.prototype.gsub.prepareReplacement = function(replacement) {
- if (typeof replacement == 'function') return replacement;
- var template = new Template(replacement);
- return function(match) { return template.evaluate(match) };
-}
-
-String.prototype.parseQuery = String.prototype.toQueryParams;
-
-var Template = Class.create();
-Template.Pattern = /(^|.|\r|\n)(#\{(.*?)\})/;
-Template.prototype = {
- initialize: function(template, pattern) {
- this.template = template.toString();
- this.pattern = pattern || Template.Pattern;
- },
-
- evaluate: function(object) {
- return this.template.gsub(this.pattern, function(match) {
- var before = match[1];
- if (before == '\\') return match[2];
- return before + String.interpret(object[match[3]]);
- });
- }
-}
-
-var $break = new Object();
-var $continue = new Object();
-
-var Enumerable = {
- each: function(iterator) {
- var index = 0;
- try {
- this._each(function(value) {
- try {
- iterator(value, index++);
- } catch (e) {
- if (e != $continue) throw e;
- }
- });
- } catch (e) {
- if (e != $break) throw e;
- }
- return this;
- },
-
- eachSlice: function(number, iterator) {
- var index = -number, slices = [], array = this.toArray();
- while ((index += number) < array.length)
- slices.push(array.slice(index, index+number));
- return slices.map(iterator);
- },
-
- all: function(iterator) {
- var result = true;
- this.each(function(value, index) {
- result = result && !!(iterator || Prototype.K)(value, index);
- if (!result) throw $break;
- });
- return result;
- },
-
- any: function(iterator) {
- var result = false;
- this.each(function(value, index) {
- if (result = !!(iterator || Prototype.K)(value, index))
- throw $break;
- });
- return result;
- },
-
- collect: function(iterator) {
- var results = [];
- this.each(function(value, index) {
- results.push((iterator || Prototype.K)(value, index));
- });
- return results;
- },
-
- detect: function(iterator) {
- var result;
- this.each(function(value, index) {
- if (iterator(value, index)) {
- result = value;
- throw $break;
- }
- });
- return result;
- },
-
- findAll: function(iterator) {
- var results = [];
- this.each(function(value, index) {
- if (iterator(value, index))
- results.push(value);
- });
- return results;
- },
-
- grep: function(pattern, iterator) {
- var results = [];
- this.each(function(value, index) {
- var stringValue = value.toString();
- if (stringValue.match(pattern))
- results.push((iterator || Prototype.K)(value, index));
- })
- return results;
- },
-
- include: function(object) {
- var found = false;
- this.each(function(value) {
- if (value == object) {
- found = true;
- throw $break;
- }
- });
- return found;
- },
-
- inGroupsOf: function(number, fillWith) {
- fillWith = fillWith === undefined ? null : fillWith;
- return this.eachSlice(number, function(slice) {
- while(slice.length < number) slice.push(fillWith);
- return slice;
- });
- },
-
- inject: function(memo, iterator) {
- this.each(function(value, index) {
- memo = iterator(memo, value, index);
- });
- return memo;
- },
-
- invoke: function(method) {
- var args = $A(arguments).slice(1);
- return this.map(function(value) {
- return value[method].apply(value, args);
- });
- },
-
- max: function(iterator) {
- var result;
- this.each(function(value, index) {
- value = (iterator || Prototype.K)(value, index);
- if (result == undefined || value >= result)
- result = value;
- });
- return result;
- },
-
- min: function(iterator) {
- var result;
- this.each(function(value, index) {
- value = (iterator || Prototype.K)(value, index);
- if (result == undefined || value < result)
- result = value;
- });
- return result;
- },
-
- partition: function(iterator) {
- var trues = [], falses = [];
- this.each(function(value, index) {
- ((iterator || Prototype.K)(value, index) ?
- trues : falses).push(value);
- });
- return [trues, falses];
- },
-
- pluck: function(property) {
- var results = [];
- this.each(function(value, index) {
- results.push(value[property]);
- });
- return results;
- },
-
- reject: function(iterator) {
- var results = [];
- this.each(function(value, index) {
- if (!iterator(value, index))
- results.push(value);
- });
- return results;
- },
-
- sortBy: function(iterator) {
- return this.map(function(value, index) {
- return {value: value, criteria: iterator(value, index)};
- }).sort(function(left, right) {
- var a = left.criteria, b = right.criteria;
- return a < b ? -1 : a > b ? 1 : 0;
- }).pluck('value');
- },
-
- toArray: function() {
- return this.map();
- },
-
- zip: function() {
- var iterator = Prototype.K, args = $A(arguments);
- if (typeof args.last() == 'function')
- iterator = args.pop();
-
- var collections = [this].concat(args).map($A);
- return this.map(function(value, index) {
- return iterator(collections.pluck(index));
- });
- },
-
- size: function() {
- return this.toArray().length;
- },
-
- inspect: function() {
- return '#<Enumerable:' + this.toArray().inspect() + '>';
- }
-}
-
-Object.extend(Enumerable, {
- map: Enumerable.collect,
- find: Enumerable.detect,
- select: Enumerable.findAll,
- member: Enumerable.include,
- entries: Enumerable.toArray
-});
-var $A = Array.from = function(iterable) {
- if (!iterable) return [];
- if (iterable.toArray) {
- return iterable.toArray();
- } else {
- var results = [];
- for (var i = 0, length = iterable.length; i < length; i++)
- results.push(iterable[i]);
- return results;
- }
-}
-
-Object.extend(Array.prototype, Enumerable);
-
-if (!Array.prototype._reverse)
- Array.prototype._reverse = Array.prototype.reverse;
-
-Object.extend(Array.prototype, {
- _each: function(iterator) {
- for (var i = 0, length = this.length; i < length; i++)
- iterator(this[i]);
- },
-
- clear: function() {
- this.length = 0;
- return this;
- },
-
- first: function() {
- return this[0];
- },
-
- last: function() {
- return this[this.length - 1];
- },
-
- compact: function() {
- return this.select(function(value) {
- return value != null;
- });
- },
-
- flatten: function() {
- return this.inject([], function(array, value) {
- return array.concat(value && value.constructor == Array ?
- value.flatten() : [value]);
- });
- },
-
- without: function() {
- var values = $A(arguments);
- return this.select(function(value) {
- return !values.include(value);
- });
- },
-
- indexOf: function(object) {
- for (var i = 0, length = this.length; i < length; i++)
- if (this[i] == object) return i;
- return -1;
- },
-
- reverse: function(inline) {
- return (inline !== false ? this : this.toArray())._reverse();
- },
-
- reduce: function() {
- return this.length > 1 ? this : this[0];
- },
-
- uniq: function() {
- return this.inject([], function(array, value) {
- return array.include(value) ? array : array.concat([value]);
- });
- },
-
- clone: function() {
- return [].concat(this);
- },
-
- size: function() {
- return this.length;
- },
-
- inspect: function() {
- return '[' + this.map(Object.inspect).join(', ') + ']';
- }
-});
-
-Array.prototype.toArray = Array.prototype.clone;
-
-function $w(string){
- string = string.strip();
- return string ? string.split(/\s+/) : [];
-}
-
-if(window.opera){
- Array.prototype.concat = function(){
- var array = [];
- for(var i = 0, length = this.length; i < length; i++) array.push(this[i]);
- for(var i = 0, length = arguments.length; i < length; i++) {
- if(arguments[i].constructor == Array) {
- for(var j = 0, arrayLength = arguments[i].length; j < arrayLength; j++)
- array.push(arguments[i][j]);
- } else {
- array.push(arguments[i]);
- }
- }
- return array;
- }
-}
-var Hash = function(obj) {
- Object.extend(this, obj || {});
-};
-
-Object.extend(Hash, {
- toQueryString: function(obj) {
- var parts = [];
-
- this.prototype._each.call(obj, function(pair) {
- if (!pair.key) return;
-
- if (pair.value && pair.value.constructor == Array) {
- var values = pair.value.compact();
- if (values.length < 2) pair.value = values.reduce();
- else {
- key = encodeURIComponent(pair.key);
- values.each(function(value) {
- value = value != undefined ? encodeURIComponent(value) : '';
- parts.push(key + '=' + encodeURIComponent(value));
- });
- return;
- }
- }
- if (pair.value == undefined) pair[1] = '';
- parts.push(pair.map(encodeURIComponent).join('='));
- });
-
- return parts.join('&');
- }
-});
-
-Object.extend(Hash.prototype, Enumerable);
-Object.extend(Hash.prototype, {
- _each: function(iterator) {
- for (var key in this) {
- var value = this[key];
- if (value && value == Hash.prototype[key]) continue;
-
- var pair = [key, value];
- pair.key = key;
- pair.value = value;
- iterator(pair);
- }
- },
-
- keys: function() {
- return this.pluck('key');
- },
-
- values: function() {
- return this.pluck('value');
- },
-
- merge: function(hash) {
- return $H(hash).inject(this, function(mergedHash, pair) {
- mergedHash[pair.key] = pair.value;
- return mergedHash;
- });
- },
-
- remove: function() {
- var result;
- for(var i = 0, length = arguments.length; i < length; i++) {
- var value = this[arguments[i]];
- if (value !== undefined){
- if (result === undefined) result = value;
- else {
- if (result.constructor != Array) result = [result];
- result.push(value)
- }
- }
- delete this[arguments[i]];
- }
- return result;
- },
-
- toQueryString: function() {
- return Hash.toQueryString(this);
- },
-
- inspect: function() {
- return '#<Hash:{' + this.map(function(pair) {
- return pair.map(Object.inspect).join(': ');
- }).join(', ') + '}>';
- }
-});
-
-function $H(object) {
- if (object && object.constructor == Hash) return object;
- return new Hash(object);
-};
-ObjectRange = Class.create();
-Object.extend(ObjectRange.prototype, Enumerable);
-Object.extend(ObjectRange.prototype, {
- initialize: function(start, end, exclusive) {
- this.start = start;
- this.end = end;
- this.exclusive = exclusive;
- },
-
- _each: function(iterator) {
- var value = this.start;
- while (this.include(value)) {
- iterator(value);
- value = value.succ();
- }
- },
-
- include: function(value) {
- if (value < this.start)
- return false;
- if (this.exclusive)
- return value < this.end;
- return value <= this.end;
- }
-});
-
-var $R = function(start, end, exclusive) {
- return new ObjectRange(start, end, exclusive);
-}
-
-var Ajax = {
- getTransport: function() {
- return Try.these(
- function() {return new XMLHttpRequest()},
- function() {return new ActiveXObject('Msxml2.XMLHTTP')},
- function() {return new ActiveXObject('Microsoft.XMLHTTP')}
- ) || false;
- },
-
- activeRequestCount: 0
-}
-
-Ajax.Responders = {
- responders: [],
-
- _each: function(iterator) {
- this.responders._each(iterator);
- },
-
- register: function(responder) {
- if (!this.include(responder))
- this.responders.push(responder);
- },
-
- unregister: function(responder) {
- this.responders = this.responders.without(responder);
- },
-
- dispatch: function(callback, request, transport, json) {
- this.each(function(responder) {
- if (typeof responder[callback] == 'function') {
- try {
- responder[callback].apply(responder, [request, transport, json]);
- } catch (e) {}
- }
- });
- }
};
-Object.extend(Ajax.Responders, Enumerable);
-
-Ajax.Responders.register({
- onCreate: function() {
- Ajax.activeRequestCount++;
- },
- onComplete: function() {
- Ajax.activeRequestCount--;
- }
-});
-
-Ajax.Base = function() {};
-Ajax.Base.prototype = {
- setOptions: function(options) {
- this.options = {
- method: 'post',
- asynchronous: true,
- contentType: 'application/x-www-form-urlencoded',
- encoding: 'UTF-8',
- parameters: ''
- }
- Object.extend(this.options, options || {});
-
- this.options.method = this.options.method.toLowerCase();
- if (typeof this.options.parameters == 'string')
- this.options.parameters = this.options.parameters.toQueryParams();
- }
-}
-
-Ajax.Request = Class.create();
-Ajax.Request.Events =
- ['Uninitialized', 'Loading', 'Loaded', 'Interactive', 'Complete'];
-
-Ajax.Request.prototype = Object.extend(new Ajax.Base(), {
- _complete: false,
-
- initialize: function(url, options) {
- this.transport = Ajax.getTransport();
- this.setOptions(options);
- this.request(url);
- },
-
- request: function(url) {
- this.url = url;
- this.method = this.options.method;
- var params = this.options.parameters;
-
- if (!['get', 'post'].include(this.method)) {
- // simulate other verbs over post
- params['_method'] = this.method;
- this.method = 'post';
- }
-
- params = Hash.toQueryString(params);
- if (params && /Konqueror|Safari|KHTML/.test(navigator.userAgent)) params += '&_='
-
- // when GET, append parameters to URL
- if (this.method == 'get' && params)
- this.url += (this.url.indexOf('?') > -1 ? '&' : '?') + params;
-
- try {
- Ajax.Responders.dispatch('onCreate', this, this.transport);
-
- this.transport.open(this.method.toUpperCase(), this.url,
- this.options.asynchronous);
-
- if (this.options.asynchronous)
- setTimeout(function() { this.respondToReadyState(1) }.bind(this), 10);
-
- this.transport.onreadystatechange = this.onStateChange.bind(this);
- this.setRequestHeaders();
-
- var body = this.method == 'post' ? (this.options.postBody || params) : null;
-
- this.transport.send(body);
-
- /* Force Firefox to handle ready state 4 for synchronous requests */
- if (!this.options.asynchronous && this.transport.overrideMimeType)
- this.onStateChange();
-
- }
- catch (e) {
- this.dispatchException(e);
- }
- },
-
- onStateChange: function() {
- var readyState = this.transport.readyState;
- if (readyState > 1 && !((readyState == 4) && this._complete))
- this.respondToReadyState(this.transport.readyState);
- },
-
- setRequestHeaders: function() {
- var headers = {
- 'X-Requested-With': 'XMLHttpRequest',
- 'X-Prototype-Version': Prototype.Version,
- 'Accept': 'text/javascript, text/html, application/xml, text/xml, */*'
- };
-
- if (this.method == 'post') {
- headers['Content-type'] = this.options.contentType +
- (this.options.encoding ? '; charset=' + this.options.encoding : '');
-
- /* Force "Connection: close" for older Mozilla browsers to work
- * around a bug where XMLHttpRequest sends an incorrect
- * Content-length header. See Mozilla Bugzilla #246651.
- */
- if (this.transport.overrideMimeType &&
- (navigator.userAgent.match(/Gecko\/(\d{4})/) || [0,2005])[1] < 2005)
- headers['Connection'] = 'close';
- }
-
- // user-defined headers
- if (typeof this.options.requestHeaders == 'object') {
- var extras = this.options.requestHeaders;
-
- if (typeof extras.push == 'function')
- for (var i = 0, length = extras.length; i < length; i += 2)
- headers[extras[i]] = extras[i+1];
- else
- $H(extras).each(function(pair) { headers[pair.key] = pair.value });
- }
-
- for (var name in headers)
- this.transport.setRequestHeader(name, headers[name]);
- },
-
- success: function() {
- return !this.transport.status
- || (this.transport.status >= 200 && this.transport.status < 300);
- },
-
- respondToReadyState: function(readyState) {
- var state = Ajax.Request.Events[readyState];
- var transport = this.transport, json = this.evalJSON();
-
- if (state == 'Complete') {
- try {
- this._complete = true;
- (this.options['on' + this.transport.status]
- || this.options['on' + (this.success() ? 'Success' : 'Failure')]
- || Prototype.emptyFunction)(transport, json);
- } catch (e) {
- this.dispatchException(e);
- }
-
- if ((this.getHeader('Content-type') || 'text/javascript').strip().
- match(/^(text|application)\/(x-)?(java|ecma)script(;.*)?$/i))
- this.evalResponse();
- }
-
- try {
- (this.options['on' + state] || Prototype.emptyFunction)(transport, json);
- Ajax.Responders.dispatch('on' + state, this, transport, json);
- } catch (e) {
- this.dispatchException(e);
- }
-
- if (state == 'Complete') {
- // avoid memory leak in MSIE: clean up
- this.transport.onreadystatechange = Prototype.emptyFunction;
- }
- },
-
- getHeader: function(name) {
- try {
- return this.transport.getResponseHeader(name);
- } catch (e) { return null }
- },
-
- evalJSON: function() {
- try {
- var json = this.getHeader('X-JSON');
- return json ? eval('(' + json + ')') : null;
- } catch (e) { return null }
- },
-
- evalResponse: function() {
- try {
- return eval(this.transport.responseText);
- } catch (e) {
- this.dispatchException(e);
- }
- },
-
- dispatchException: function(exception) {
- (this.options.onException || Prototype.emptyFunction)(this, exception);
- Ajax.Responders.dispatch('onException', this, exception);
- }
-});
-
-Ajax.Updater = Class.create();
-
-Object.extend(Object.extend(Ajax.Updater.prototype, Ajax.Request.prototype), {
- initialize: function(container, url, options) {
- this.container = {
- success: (container.success || container),
- failure: (container.failure || (container.success ? null : container))
- }
-
- this.transport = Ajax.getTransport();
- this.setOptions(options);
-
- var onComplete = this.options.onComplete || Prototype.emptyFunction;
- this.options.onComplete = (function(transport, param) {
- this.updateContent();
- onComplete(transport, param);
- }).bind(this);
-
- this.request(url);
- },
-
- updateContent: function() {
- var receiver = this.container[this.success() ? 'success' : 'failure'];
- var response = this.transport.responseText;
-
- if (!this.options.evalScripts) response = response.stripScripts();
-
- if (receiver = $(receiver)) {
- if (this.options.insertion)
- new this.options.insertion(receiver, response);
- else
- receiver.update(response);
- }
-
- if (this.success()) {
- if (this.onComplete)
- setTimeout(this.onComplete.bind(this), 10);
- }
- }
-});
-
-Ajax.PeriodicalUpdater = Class.create();
-Ajax.PeriodicalUpdater.prototype = Object.extend(new Ajax.Base(), {
- initialize: function(container, url, options) {
- this.setOptions(options);
- this.onComplete = this.options.onComplete;
-
- this.frequency = (this.options.frequency || 2);
- this.decay = (this.options.decay || 1);
-
- this.updater = {};
- this.container = container;
- this.url = url;
-
- this.start();
- },
-
- start: function() {
- this.options.onComplete = this.updateComplete.bind(this);
- this.onTimerEvent();
- },
-
- stop: function() {
- this.updater.options.onComplete = undefined;
- clearTimeout(this.timer);
- (this.onComplete || Prototype.emptyFunction).apply(this, arguments);
- },
-
- updateComplete: function(request) {
- if (this.options.decay) {
- this.decay = (request.responseText == this.lastText ?
- this.decay * this.options.decay : 1);
-
- this.lastText = request.responseText;
- }
- this.timer = setTimeout(this.onTimerEvent.bind(this),
- this.decay * this.frequency * 1000);
- },
-
- onTimerEvent: function() {
- this.updater = new Ajax.Updater(this.container, this.url, this.options);
- }
-});
-function $(element) {
- if (arguments.length > 1) {
- for (var i = 0, elements = [], length = arguments.length; i < length; i++)
- elements.push($(arguments[i]));
- return elements;
- }
- if (typeof element == 'string')
- element = document.getElementById(element);
- return Element.extend(element);
-}
-
-if (Prototype.BrowserFeatures.XPath) {
- document._getElementsByXPath = function(expression, parentElement) {
- var results = [];
- var query = document.evaluate(expression, $(parentElement) || document,
- null, XPathResult.ORDERED_NODE_SNAPSHOT_TYPE, null);
- for (var i = 0, length = query.snapshotLength; i < length; i++)
- results.push(query.snapshotItem(i));
- return results;
- };
-}
-
-document.getElementsByClassName = function(className, parentElement) {
- if (Prototype.BrowserFeatures.XPath) {
- var q = ".//*[contains(concat(' ', @class, ' '), ' " + className + " ')]";
- return document._getElementsByXPath(q, parentElement);
- } else {
- var children = ($(parentElement) || document.body).getElementsByTagName('*');
- var elements = [], child;
- for (var i = 0, length = children.length; i < length; i++) {
- child = children[i];
- if (Element.hasClassName(child, className))
- elements.push(Element.extend(child));
- }
- return elements;
- }
-};
-
-/*--------------------------------------------------------------------------*/
-
-if (!window.Element)
- var Element = new Object();
-
-Element.extend = function(element) {
- if (!element || _nativeExtensions || element.nodeType == 3) return element;
-
- if (!element._extended && element.tagName && element != window) {
- var methods = Object.clone(Element.Methods), cache = Element.extend.cache;
-
- if (element.tagName == 'FORM')
- Object.extend(methods, Form.Methods);
- if (['INPUT', 'TEXTAREA', 'SELECT'].include(element.tagName))
- Object.extend(methods, Form.Element.Methods);
-
- Object.extend(methods, Element.Methods.Simulated);
-
- for (var property in methods) {
- var value = methods[property];
- if (typeof value == 'function' && !(property in element))
- element[property] = cache.findOrStore(value);
- }
- }
-
- element._extended = true;
- return element;
-};
-
-Element.extend.cache = {
- findOrStore: function(value) {
- return this[value] = this[value] || function() {
- return value.apply(null, [this].concat($A(arguments)));
- }
- }
-};
-
-Element.Methods = {
- visible: function(element) {
- return $(element).style.display != 'none';
- },
-
- toggle: function(element) {
- element = $(element);
- Element[Element.visible(element) ? 'hide' : 'show'](element);
- return element;
- },
-
- hide: function(element) {
- $(element).style.display = 'none';
- return element;
- },
-
- show: function(element) {
- $(element).style.display = '';
- return element;
- },
-
- remove: function(element) {
- element = $(element);
- element.parentNode.removeChild(element);
- return element;
- },
-
- update: function(element, html) {
- html = typeof html == 'undefined' ? '' : html.toString();
- $(element).innerHTML = html.stripScripts();
- setTimeout(function() {html.evalScripts()}, 10);
- return element;
- },
-
- replace: function(element, html) {
- element = $(element);
- html = typeof html == 'undefined' ? '' : html.toString();
- if (element.outerHTML) {
- element.outerHTML = html.stripScripts();
- } else {
- var range = element.ownerDocument.createRange();
- range.selectNodeContents(element);
- element.parentNode.replaceChild(
- range.createContextualFragment(html.stripScripts()), element);
- }
- setTimeout(function() {html.evalScripts()}, 10);
- return element;
- },
-
- inspect: function(element) {
- element = $(element);
- var result = '<' + element.tagName.toLowerCase();
- $H({'id': 'id', 'className': 'class'}).each(function(pair) {
- var property = pair.first(), attribute = pair.last();
- var value = (element[property] || '').toString();
- if (value) result += ' ' + attribute + '=' + value.inspect(true);
- });
- return result + '>';
- },
-
- recursivelyCollect: function(element, property) {
- element = $(element);
- var elements = [];
- while (element = element[property])
- if (element.nodeType == 1)
- elements.push(Element.extend(element));
- return elements;
- },
-
- ancestors: function(element) {
- return $(element).recursivelyCollect('parentNode');
- },
-
- descendants: function(element) {
- return $A($(element).getElementsByTagName('*'));
- },
-
- immediateDescendants: function(element) {
- if (!(element = $(element).firstChild)) return [];
- while (element && element.nodeType != 1) element = element.nextSibling;
- if (element) return [element].concat($(element).nextSiblings());
- return [];
- },
-
- previousSiblings: function(element) {
- return $(element).recursivelyCollect('previousSibling');
- },
-
- nextSiblings: function(element) {
- return $(element).recursivelyCollect('nextSibling');
- },
-
- siblings: function(element) {
- element = $(element);
- return element.previousSiblings().reverse().concat(element.nextSiblings());
- },
-
- match: function(element, selector) {
- if (typeof selector == 'string')
- selector = new Selector(selector);
- return selector.match($(element));
- },
-
- up: function(element, expression, index) {
- return Selector.findElement($(element).ancestors(), expression, index);
- },
-
- down: function(element, expression, index) {
- return Selector.findElement($(element).descendants(), expression, index);
- },
-
- previous: function(element, expression, index) {
- return Selector.findElement($(element).previousSiblings(), expression, index);
- },
-
- next: function(element, expression, index) {
- return Selector.findElement($(element).nextSiblings(), expression, index);
- },
-
- getElementsBySelector: function() {
- var args = $A(arguments), element = $(args.shift());
- return Selector.findChildElements(element, args);
- },
-
- getElementsByClassName: function(element, className) {
- return document.getElementsByClassName(className, element);
- },
-
- readAttribute: function(element, name) {
- element = $(element);
- if (document.all && !window.opera) {
- var t = Element._attributeTranslations;
- if (t.values[name]) return t.values[name](element, name);
- if (t.names[name]) name = t.names[name];
- var attribute = element.attributes[name];
- if(attribute) return attribute.nodeValue;
- }
- return element.getAttribute(name);
- },
-
- getHeight: function(element) {
- return $(element).getDimensions().height;
- },
-
- getWidth: function(element) {
- return $(element).getDimensions().width;
- },
-
- classNames: function(element) {
- return new Element.ClassNames(element);
- },
-
- hasClassName: function(element, className) {
- if (!(element = $(element))) return;
- var elementClassName = element.className;
- if (elementClassName.length == 0) return false;
- if (elementClassName == className ||
- elementClassName.match(new RegExp("(^|\\s)" + className + "(\\s|$)")))
- return true;
- return false;
- },
-
- addClassName: function(element, className) {
- if (!(element = $(element))) return;
- Element.classNames(element).add(className);
- return element;
- },
-
- removeClassName: function(element, className) {
- if (!(element = $(element))) return;
- Element.classNames(element).remove(className);
- return element;
- },
-
- toggleClassName: function(element, className) {
- if (!(element = $(element))) return;
- Element.classNames(element)[element.hasClassName(className) ? 'remove' : 'add'](className);
- return element;
- },
-
- observe: function() {
- Event.observe.apply(Event, arguments);
- return $A(arguments).first();
- },
-
- stopObserving: function() {
- Event.stopObserving.apply(Event, arguments);
- return $A(arguments).first();
- },
-
- // removes whitespace-only text node children
- cleanWhitespace: function(element) {
- element = $(element);
- var node = element.firstChild;
- while (node) {
- var nextNode = node.nextSibling;
- if (node.nodeType == 3 && !/\S/.test(node.nodeValue))
- element.removeChild(node);
- node = nextNode;
- }
- return element;
- },
-
- empty: function(element) {
- return $(element).innerHTML.match(/^\s*$/);
- },
-
- descendantOf: function(element, ancestor) {
- element = $(element), ancestor = $(ancestor);
- while (element = element.parentNode)
- if (element == ancestor) return true;
- return false;
- },
-
- scrollTo: function(element) {
- element = $(element);
- var pos = Position.cumulativeOffset(element);
- window.scrollTo(pos[0], pos[1]);
- return element;
- },
-
- getStyle: function(element, style) {
- element = $(element);
- if (['float','cssFloat'].include(style))
- style = (typeof element.style.styleFloat != 'undefined' ? 'styleFloat' : 'cssFloat');
- style = style.camelize();
- var value = element.style[style];
- if (!value) {
- if (document.defaultView && document.defaultView.getComputedStyle) {
- var css = document.defaultView.getComputedStyle(element, null);
- value = css ? css[style] : null;
- } else if (element.currentStyle) {
- value = element.currentStyle[style];
- }
- }
-
- if((value == 'auto') && ['width','height'].include(style) && (element.getStyle('display') != 'none'))
- value = element['offset'+style.capitalize()] + 'px';
-
- if (window.opera && ['left', 'top', 'right', 'bottom'].include(style))
- if (Element.getStyle(element, 'position') == 'static') value = 'auto';
- if(style == 'opacity') {
- if(value) return parseFloat(value);
- if(value = (element.getStyle('filter') || '').match(/alpha\(opacity=(.*)\)/))
- if(value[1]) return parseFloat(value[1]) / 100;
- return 1.0;
- }
- return value == 'auto' ? null : value;
- },
-
- setStyle: function(element, style) {
- element = $(element);
- for (var name in style) {
- var value = style[name];
- if(name == 'opacity') {
- if (value == 1) {
- value = (/Gecko/.test(navigator.userAgent) &&
- !/Konqueror|Safari|KHTML/.test(navigator.userAgent)) ? 0.999999 : 1.0;
- if(/MSIE/.test(navigator.userAgent) && !window.opera)
- element.style.filter = element.getStyle('filter').replace(/alpha\([^\)]*\)/gi,'');
- } else if(value == '') {
- if(/MSIE/.test(navigator.userAgent) && !window.opera)
- element.style.filter = element.getStyle('filter').replace(/alpha\([^\)]*\)/gi,'');
- } else {
- if(value < 0.00001) value = 0;
- if(/MSIE/.test(navigator.userAgent) && !window.opera)
- element.style.filter = element.getStyle('filter').replace(/alpha\([^\)]*\)/gi,'') +
- 'alpha(opacity='+value*100+')';
- }
- } else if(['float','cssFloat'].include(name)) name = (typeof element.style.styleFloat != 'undefined') ? 'styleFloat' : 'cssFloat';
- element.style[name.camelize()] = value;
- }
- return element;
- },
-
- getDimensions: function(element) {
- element = $(element);
- var display = $(element).getStyle('display');
- if (display != 'none' && display != null) // Safari bug
- return {width: element.offsetWidth, height: element.offsetHeight};
-
- // All *Width and *Height properties give 0 on elements with display none,
- // so enable the element temporarily
- var els = element.style;
- var originalVisibility = els.visibility;
- var originalPosition = els.position;
- var originalDisplay = els.display;
- els.visibility = 'hidden';
- els.position = 'absolute';
- els.display = 'block';
- var originalWidth = element.clientWidth;
- var originalHeight = element.clientHeight;
- els.display = originalDisplay;
- els.position = originalPosition;
- els.visibility = originalVisibility;
- return {width: originalWidth, height: originalHeight};
- },
-
- makePositioned: function(element) {
- element = $(element);
- var pos = Element.getStyle(element, 'position');
- if (pos == 'static' || !pos) {
- element._madePositioned = true;
- element.style.position = 'relative';
- // Opera returns the offset relative to the positioning context, when an
- // element is position relative but top and left have not been defined
- if (window.opera) {
- element.style.top = 0;
- element.style.left = 0;
- }
- }
- return element;
- },
-
- undoPositioned: function(element) {
- element = $(element);
- if (element._madePositioned) {
- element._madePositioned = undefined;
- element.style.position =
- element.style.top =
- element.style.left =
- element.style.bottom =
- element.style.right = '';
- }
- return element;
- },
-
- makeClipping: function(element) {
- element = $(element);
- if (element._overflow) return element;
- element._overflow = element.style.overflow || 'auto';
- if ((Element.getStyle(element, 'overflow') || 'visible') != 'hidden')
- element.style.overflow = 'hidden';
- return element;
- },
-
- undoClipping: function(element) {
- element = $(element);
- if (!element._overflow) return element;
- element.style.overflow = element._overflow == 'auto' ? '' : element._overflow;
- element._overflow = null;
- return element;
- }
-};
-
-Object.extend(Element.Methods, {childOf: Element.Methods.descendantOf});
-
-Element._attributeTranslations = {};
-
-Element._attributeTranslations.names = {
- colspan: "colSpan",
- rowspan: "rowSpan",
- valign: "vAlign",
- datetime: "dateTime",
- accesskey: "accessKey",
- tabindex: "tabIndex",
- enctype: "encType",
- maxlength: "maxLength",
- readonly: "readOnly",
- longdesc: "longDesc"
-};
-
-Element._attributeTranslations.values = {
- _getAttr: function(element, attribute) {
- return element.getAttribute(attribute, 2);
- },
-
- _flag: function(element, attribute) {
- return $(element).hasAttribute(attribute) ? attribute : null;
- },
-
- style: function(element) {
- return element.style.cssText.toLowerCase();
- },
-
- title: function(element) {
- var node = element.getAttributeNode('title');
- return node.specified ? node.nodeValue : null;
- }
-};
-
-Object.extend(Element._attributeTranslations.values, {
- href: Element._attributeTranslations.values._getAttr,
- src: Element._attributeTranslations.values._getAttr,
- disabled: Element._attributeTranslations.values._flag,
- checked: Element._attributeTranslations.values._flag,
- readonly: Element._attributeTranslations.values._flag,
- multiple: Element._attributeTranslations.values._flag
-});
-
-Element.Methods.Simulated = {
- hasAttribute: function(element, attribute) {
- var t = Element._attributeTranslations;
- attribute = t.names[attribute] || attribute;
- return $(element).getAttributeNode(attribute).specified;
- }
-};
-
-// IE is missing .innerHTML support for TABLE-related elements
-if (document.all && !window.opera){
- Element.Methods.update = function(element, html) {
- element = $(element);
- html = typeof html == 'undefined' ? '' : html.toString();
- var tagName = element.tagName.toUpperCase();
- if (['THEAD','TBODY','TR','TD'].include(tagName)) {
- var div = document.createElement('div');
- switch (tagName) {
- case 'THEAD':
- case 'TBODY':
- div.innerHTML = '<table><tbody>' + html.stripScripts() + '</tbody></table>';
- depth = 2;
- break;
- case 'TR':
- div.innerHTML = '<table><tbody><tr>' + html.stripScripts() + '</tr></tbody></table>';
- depth = 3;
- break;
- case 'TD':
- div.innerHTML = '<table><tbody><tr><td>' + html.stripScripts() + '</td></tr></tbody></table>';
- depth = 4;
- }
- $A(element.childNodes).each(function(node){
- element.removeChild(node)
- });
- depth.times(function(){ div = div.firstChild });
-
- $A(div.childNodes).each(
- function(node){ element.appendChild(node) });
- } else {
- element.innerHTML = html.stripScripts();
- }
- setTimeout(function() {html.evalScripts()}, 10);
- return element;
- }
-};
-
-Object.extend(Element, Element.Methods);
-
-var _nativeExtensions = false;
-
-if(/Konqueror|Safari|KHTML/.test(navigator.userAgent))
- ['', 'Form', 'Input', 'TextArea', 'Select'].each(function(tag) {
- var className = 'HTML' + tag + 'Element';
- if(window[className]) return;
- var klass = window[className] = {};
- klass.prototype = document.createElement(tag ? tag.toLowerCase() : 'div').__proto__;
- });
-
-Element.addMethods = function(methods) {
- Object.extend(Element.Methods, methods || {});
-
- function copy(methods, destination, onlyIfAbsent) {
- onlyIfAbsent = onlyIfAbsent || false;
- var cache = Element.extend.cache;
- for (var property in methods) {
- var value = methods[property];
- if (!onlyIfAbsent || !(property in destination))
- destination[property] = cache.findOrStore(value);
- }
- }
-
- if (typeof HTMLElement != 'undefined') {
- copy(Element.Methods, HTMLElement.prototype);
- copy(Element.Methods.Simulated, HTMLElement.prototype, true);
- copy(Form.Methods, HTMLFormElement.prototype);
- [HTMLInputElement, HTMLTextAreaElement, HTMLSelectElement].each(function(klass) {
- copy(Form.Element.Methods, klass.prototype);
- });
- _nativeExtensions = true;
- }
-}
-
-var Toggle = new Object();
-Toggle.display = Element.toggle;
-
-/*--------------------------------------------------------------------------*/
-
-Abstract.Insertion = function(adjacency) {
- this.adjacency = adjacency;
-}
-
-Abstract.Insertion.prototype = {
- initialize: function(element, content) {
- this.element = $(element);
- this.content = content.stripScripts();
-
- if (this.adjacency && this.element.insertAdjacentHTML) {
- try {
- this.element.insertAdjacentHTML(this.adjacency, this.content);
- } catch (e) {
- var tagName = this.element.tagName.toUpperCase();
- if (['TBODY', 'TR'].include(tagName)) {
- this.insertContent(this.contentFromAnonymousTable());
- } else {
- throw e;
- }
- }
- } else {
- this.range = this.element.ownerDocument.createRange();
- if (this.initializeRange) this.initializeRange();
- this.insertContent([this.range.createContextualFragment(this.content)]);
- }
-
- setTimeout(function() {content.evalScripts()}, 10);
- },
-
- contentFromAnonymousTable: function() {
- var div = document.createElement('div');
- div.innerHTML = '<table><tbody>' + this.content + '</tbody></table>';
- return $A(div.childNodes[0].childNodes[0].childNodes);
- }
-}
-
-var Insertion = new Object();
-
-Insertion.Before = Class.create();
-Insertion.Before.prototype = Object.extend(new Abstract.Insertion('beforeBegin'), {
- initializeRange: function() {
- this.range.setStartBefore(this.element);
- },
-
- insertContent: function(fragments) {
- fragments.each((function(fragment) {
- this.element.parentNode.insertBefore(fragment, this.element);
- }).bind(this));
- }
-});
-
-Insertion.Top = Class.create();
-Insertion.Top.prototype = Object.extend(new Abstract.Insertion('afterBegin'), {
- initializeRange: function() {
- this.range.selectNodeContents(this.element);
- this.range.collapse(true);
- },
-
- insertContent: function(fragments) {
- fragments.reverse(false).each((function(fragment) {
- this.element.insertBefore(fragment, this.element.firstChild);
- }).bind(this));
- }
-});
-
-Insertion.Bottom = Class.create();
-Insertion.Bottom.prototype = Object.extend(new Abstract.Insertion('beforeEnd'), {
- initializeRange: function() {
- this.range.selectNodeContents(this.element);
- this.range.collapse(this.element);
- },
-
- insertContent: function(fragments) {
- fragments.each((function(fragment) {
- this.element.appendChild(fragment);
- }).bind(this));
- }
-});
-
-Insertion.After = Class.create();
-Insertion.After.prototype = Object.extend(new Abstract.Insertion('afterEnd'), {
- initializeRange: function() {
- this.range.setStartAfter(this.element);
- },
-
- insertContent: function(fragments) {
- fragments.each((function(fragment) {
- this.element.parentNode.insertBefore(fragment,
- this.element.nextSibling);
- }).bind(this));
- }
-});
-
-/*--------------------------------------------------------------------------*/
-
-Element.ClassNames = Class.create();
-Element.ClassNames.prototype = {
- initialize: function(element) {
- this.element = $(element);
- },
-
- _each: function(iterator) {
- this.element.className.split(/\s+/).select(function(name) {
- return name.length > 0;
- })._each(iterator);
- },
-
- set: function(className) {
- this.element.className = className;
- },
-
- add: function(classNameToAdd) {
- if (this.include(classNameToAdd)) return;
- this.set($A(this).concat(classNameToAdd).join(' '));
- },
-
- remove: function(classNameToRemove) {
- if (!this.include(classNameToRemove)) return;
- this.set($A(this).without(classNameToRemove).join(' '));
- },
-
- toString: function() {
- return $A(this).join(' ');
- }
-};
-
-Object.extend(Element.ClassNames.prototype, Enumerable);
-var Selector = Class.create();
-Selector.prototype = {
- initialize: function(expression) {
- this.params = {classNames: []};
- this.expression = expression.toString().strip();
- this.parseExpression();
- this.compileMatcher();
- },
-
- parseExpression: function() {
- function abort(message) { throw 'Parse error in selector: ' + message; }
-
- if (this.expression == '') abort('empty expression');
-
- var params = this.params, expr = this.expression, match, modifier, clause, rest;
- while (match = expr.match(/^(.*)\[([a-z0-9_:-]+?)(?:([~\|!]?=)(?:"([^"]*)"|([^\]\s]*)))?\]$/i)) {
- params.attributes = params.attributes || [];
- params.attributes.push({name: match[2], operator: match[3], value: match[4] || match[5] || ''});
- expr = match[1];
- }
-
- if (expr == '*') return this.params.wildcard = true;
-
- while (match = expr.match(/^([^a-z0-9_-])?([a-z0-9_-]+)(.*)/i)) {
- modifier = match[1], clause = match[2], rest = match[3];
- switch (modifier) {
- case '#': params.id = clause; break;
- case '.': params.classNames.push(clause); break;
- case '':
- case undefined: params.tagName = clause.toUpperCase(); break;
- default: abort(expr.inspect());
- }
- expr = rest;
- }
-
- if (expr.length > 0) abort(expr.inspect());
- },
-
- buildMatchExpression: function() {
- var params = this.params, conditions = [], clause;
-
- if (params.wildcard)
- conditions.push('true');
- if (clause = params.id)
- conditions.push('element.readAttribute("id") == ' + clause.inspect());
- if (clause = params.tagName)
- conditions.push('element.tagName.toUpperCase() == ' + clause.inspect());
- if ((clause = params.classNames).length > 0)
- for (var i = 0, length = clause.length; i < length; i++)
- conditions.push('element.hasClassName(' + clause[i].inspect() + ')');
- if (clause = params.attributes) {
- clause.each(function(attribute) {
- var value = 'element.readAttribute(' + attribute.name.inspect() + ')';
- var splitValueBy = function(delimiter) {
- return value + ' && ' + value + '.split(' + delimiter.inspect() + ')';
- }
-
- switch (attribute.operator) {
- case '=': conditions.push(value + ' == ' + attribute.value.inspect()); break;
- case '~=': conditions.push(splitValueBy(' ') + '.include(' + attribute.value.inspect() + ')'); break;
- case '|=': conditions.push(
- splitValueBy('-') + '.first().toUpperCase() == ' + attribute.value.toUpperCase().inspect()
- ); break;
- case '!=': conditions.push(value + ' != ' + attribute.value.inspect()); break;
- case '':
- case undefined: conditions.push('element.hasAttribute(' + attribute.name.inspect() + ')'); break;
- default: throw 'Unknown operator ' + attribute.operator + ' in selector';
- }
- });
- }
-
- return conditions.join(' && ');
- },
-
- compileMatcher: function() {
- this.match = new Function('element', 'if (!element.tagName) return false; \
- element = $(element); \
- return ' + this.buildMatchExpression());
- },
-
- findElements: function(scope) {
- var element;
-
- if (element = $(this.params.id))
- if (this.match(element))
- if (!scope || Element.childOf(element, scope))
- return [element];
-
- scope = (scope || document).getElementsByTagName(this.params.tagName || '*');
-
- var results = [];
- for (var i = 0, length = scope.length; i < length; i++)
- if (this.match(element = scope[i]))
- results.push(Element.extend(element));
-
- return results;
- },
-
- toString: function() {
- return this.expression;
- }
-}
-
-Object.extend(Selector, {
- matchElements: function(elements, expression) {
- var selector = new Selector(expression);
- return elements.select(selector.match.bind(selector)).map(Element.extend);
- },
-
- findElement: function(elements, expression, index) {
- if (typeof expression == 'number') index = expression, expression = false;
- return Selector.matchElements(elements, expression || '*')[index || 0];
- },
-
- findChildElements: function(element, expressions) {
- return expressions.map(function(expression) {
- return expression.match(/[^\s"]+(?:"[^"]*"[^\s"]+)*/g).inject([null], function(results, expr) {
- var selector = new Selector(expr);
- return results.inject([], function(elements, result) {
- return elements.concat(selector.findElements(result || element));
- });
- });
- }).flatten();
- }
-});
-
-function $$() {
- return Selector.findChildElements(document, $A(arguments));
-}
-var Form = {
- reset: function(form) {
- $(form).reset();
- return form;
- },
-
- serializeElements: function(elements, getHash) {
- var data = elements.inject({}, function(result, element) {
- if (!element.disabled && element.name) {
- var key = element.name, value = $(element).getValue();
- if (value != undefined) {
- if (result[key]) {
- if (result[key].constructor != Array) result[key] = [result[key]];
- result[key].push(value);
- }
- else result[key] = value;
- }
- }
- return result;
- });
-
- return getHash ? data : Hash.toQueryString(data);
- }
-};
-
-Form.Methods = {
- serialize: function(form, getHash) {
- return Form.serializeElements(Form.getElements(form), getHash);
- },
-
- getElements: function(form) {
- return $A($(form).getElementsByTagName('*')).inject([],
- function(elements, child) {
- if (Form.Element.Serializers[child.tagName.toLowerCase()])
- elements.push(Element.extend(child));
- return elements;
- }
- );
- },
-
- getInputs: function(form, typeName, name) {
- form = $(form);
- var inputs = form.getElementsByTagName('input');
-
- if (!typeName && !name) return $A(inputs).map(Element.extend);
-
- for (var i = 0, matchingInputs = [], length = inputs.length; i < length; i++) {
- var input = inputs[i];
- if ((typeName && input.type != typeName) || (name && input.name != name))
- continue;
- matchingInputs.push(Element.extend(input));
- }
-
- return matchingInputs;
- },
-
- disable: function(form) {
- form = $(form);
- form.getElements().each(function(element) {
- element.blur();
- element.disabled = 'true';
- });
- return form;
- },
-
- enable: function(form) {
- form = $(form);
- form.getElements().each(function(element) {
- element.disabled = '';
- });
- return form;
- },
-
- findFirstElement: function(form) {
- return $(form).getElements().find(function(element) {
- return element.type != 'hidden' && !element.disabled &&
- ['input', 'select', 'textarea'].include(element.tagName.toLowerCase());
- });
- },
-
- focusFirstElement: function(form) {
- form = $(form);
- form.findFirstElement().activate();
- return form;
- }
-}
-
-Object.extend(Form, Form.Methods);
-
-/*--------------------------------------------------------------------------*/
-
-Form.Element = {
- focus: function(element) {
- $(element).focus();
- return element;
- },
-
- select: function(element) {
- $(element).select();
- return element;
- }
-}
-
-Form.Element.Methods = {
- serialize: function(element) {
- element = $(element);
- if (!element.disabled && element.name) {
- var value = element.getValue();
- if (value != undefined) {
- var pair = {};
- pair[element.name] = value;
- return Hash.toQueryString(pair);
- }
- }
- return '';
- },
-
- getValue: function(element) {
- element = $(element);
- var method = element.tagName.toLowerCase();
- return Form.Element.Serializers[method](element);
- },
-
- clear: function(element) {
- $(element).value = '';
- return element;
- },
-
- present: function(element) {
- return $(element).value != '';
- },
-
- activate: function(element) {
- element = $(element);
- element.focus();
- if (element.select && ( element.tagName.toLowerCase() != 'input' ||
- !['button', 'reset', 'submit'].include(element.type) ) )
- element.select();
- return element;
- },
-
- disable: function(element) {
- element = $(element);
- element.disabled = true;
- return element;
- },
-
- enable: function(element) {
- element = $(element);
- element.blur();
- element.disabled = false;
- return element;
- }
-}
-
-Object.extend(Form.Element, Form.Element.Methods);
-var Field = Form.Element;
-var $F = Form.Element.getValue;
-
-/*--------------------------------------------------------------------------*/
-
-Form.Element.Serializers = {
- input: function(element) {
- switch (element.type.toLowerCase()) {
- case 'checkbox':
- case 'radio':
- return Form.Element.Serializers.inputSelector(element);
- default:
- return Form.Element.Serializers.textarea(element);
- }
- },
-
- inputSelector: function(element) {
- return element.checked ? element.value : null;
- },
-
- textarea: function(element) {
- return element.value;
- },
-
- select: function(element) {
- return this[element.type == 'select-one' ?
- 'selectOne' : 'selectMany'](element);
- },
-
- selectOne: function(element) {
- var index = element.selectedIndex;
- return index >= 0 ? this.optionValue(element.options[index]) : null;
- },
-
- selectMany: function(element) {
- var values, length = element.length;
- if (!length) return null;
-
- for (var i = 0, values = []; i < length; i++) {
- var opt = element.options[i];
- if (opt.selected) values.push(this.optionValue(opt));
- }
- return values;
- },
-
- optionValue: function(opt) {
- // extend element because hasAttribute may not be native
- return Element.extend(opt).hasAttribute('value') ? opt.value : opt.text;
- }
-}
-
-/*--------------------------------------------------------------------------*/
-
-Abstract.TimedObserver = function() {}
-Abstract.TimedObserver.prototype = {
- initialize: function(element, frequency, callback) {
- this.frequency = frequency;
- this.element = $(element);
- this.callback = callback;
-
- this.lastValue = this.getValue();
- this.registerCallback();
- },
-
- registerCallback: function() {
- setInterval(this.onTimerEvent.bind(this), this.frequency * 1000);
- },
-
- onTimerEvent: function() {
- var value = this.getValue();
- var changed = ('string' == typeof this.lastValue && 'string' == typeof value
- ? this.lastValue != value : String(this.lastValue) != String(value));
- if (changed) {
- this.callback(this.element, value);
- this.lastValue = value;
- }
- }
-}
-
-Form.Element.Observer = Class.create();
-Form.Element.Observer.prototype = Object.extend(new Abstract.TimedObserver(), {
- getValue: function() {
- return Form.Element.getValue(this.element);
- }
-});
-
-Form.Observer = Class.create();
-Form.Observer.prototype = Object.extend(new Abstract.TimedObserver(), {
- getValue: function() {
- return Form.serialize(this.element);
- }
-});
-
-/*--------------------------------------------------------------------------*/
-
-Abstract.EventObserver = function() {}
-Abstract.EventObserver.prototype = {
- initialize: function(element, callback) {
- this.element = $(element);
- this.callback = callback;
-
- this.lastValue = this.getValue();
- if (this.element.tagName.toLowerCase() == 'form')
- this.registerFormCallbacks();
- else
- this.registerCallback(this.element);
- },
-
- onElementEvent: function() {
- var value = this.getValue();
- if (this.lastValue != value) {
- this.callback(this.element, value);
- this.lastValue = value;
- }
- },
-
- registerFormCallbacks: function() {
- Form.getElements(this.element).each(this.registerCallback.bind(this));
- },
-
- registerCallback: function(element) {
- if (element.type) {
- switch (element.type.toLowerCase()) {
- case 'checkbox':
- case 'radio':
- Event.observe(element, 'click', this.onElementEvent.bind(this));
- break;
- default:
- Event.observe(element, 'change', this.onElementEvent.bind(this));
- break;
- }
- }
- }
-}
-
-Form.Element.EventObserver = Class.create();
-Form.Element.EventObserver.prototype = Object.extend(new Abstract.EventObserver(), {
- getValue: function() {
- return Form.Element.getValue(this.element);
- }
-});
-
-Form.EventObserver = Class.create();
-Form.EventObserver.prototype = Object.extend(new Abstract.EventObserver(), {
- getValue: function() {
- return Form.serialize(this.element);
- }
-});
-if (!window.Event) {
- var Event = new Object();
-}
-
-Object.extend(Event, {
- KEY_BACKSPACE: 8,
- KEY_TAB: 9,
- KEY_RETURN: 13,
- KEY_ESC: 27,
- KEY_LEFT: 37,
- KEY_UP: 38,
- KEY_RIGHT: 39,
- KEY_DOWN: 40,
- KEY_DELETE: 46,
- KEY_HOME: 36,
- KEY_END: 35,
- KEY_PAGEUP: 33,
- KEY_PAGEDOWN: 34,
-
- element: function(event) {
- return event.target || event.srcElement;
- },
-
- isLeftClick: function(event) {
- return (((event.which) && (event.which == 1)) ||
- ((event.button) && (event.button == 1)));
- },
-
- pointerX: function(event) {
- return event.pageX || (event.clientX +
- (document.documentElement.scrollLeft || document.body.scrollLeft));
- },
-
- pointerY: function(event) {
- return event.pageY || (event.clientY +
- (document.documentElement.scrollTop || document.body.scrollTop));
- },
-
- stop: function(event) {
- if (event.preventDefault) {
- event.preventDefault();
- event.stopPropagation();
- } else {
- event.returnValue = false;
- event.cancelBubble = true;
- }
- },
-
- // find the first node with the given tagName, starting from the
- // node the event was triggered on; traverses the DOM upwards
- findElement: function(event, tagName) {
- var element = Event.element(event);
- while (element.parentNode && (!element.tagName ||
- (element.tagName.toUpperCase() != tagName.toUpperCase())))
- element = element.parentNode;
- return element;
- },
-
- observers: false,
-
- _observeAndCache: function(element, name, observer, useCapture) {
- if (!this.observers) this.observers = [];
- if (element.addEventListener) {
- this.observers.push([element, name, observer, useCapture]);
- element.addEventListener(name, observer, useCapture);
- } else if (element.attachEvent) {
- this.observers.push([element, name, observer, useCapture]);
- element.attachEvent('on' + name, observer);
- }
- },
-
- unloadCache: function() {
- if (!Event.observers) return;
- for (var i = 0, length = Event.observers.length; i < length; i++) {
- Event.stopObserving.apply(this, Event.observers[i]);
- Event.observers[i][0] = null;
- }
- Event.observers = false;
- },
-
- observe: function(element, name, observer, useCapture) {
- element = $(element);
- useCapture = useCapture || false;
-
- if (name == 'keypress' &&
- (navigator.appVersion.match(/Konqueror|Safari|KHTML/)
- || element.attachEvent))
- name = 'keydown';
-
- Event._observeAndCache(element, name, observer, useCapture);
- },
-
- stopObserving: function(element, name, observer, useCapture) {
- element = $(element);
- useCapture = useCapture || false;
-
- if (name == 'keypress' &&
- (navigator.appVersion.match(/Konqueror|Safari|KHTML/)
- || element.detachEvent))
- name = 'keydown';
-
- if (element.removeEventListener) {
- element.removeEventListener(name, observer, useCapture);
- } else if (element.detachEvent) {
- try {
- element.detachEvent('on' + name, observer);
- } catch (e) {}
- }
- }
-});
-
-/* prevent memory leaks in IE */
-if (navigator.appVersion.match(/\bMSIE\b/))
- Event.observe(window, 'unload', Event.unloadCache, false);
-var Position = {
- // set to true if needed, warning: firefox performance problems
- // NOT neeeded for page scrolling, only if draggable contained in
- // scrollable elements
- includeScrollOffsets: false,
-
- // must be called before calling withinIncludingScrolloffset, every time the
- // page is scrolled
- prepare: function() {
- this.deltaX = window.pageXOffset
- || document.documentElement.scrollLeft
- || document.body.scrollLeft
- || 0;
- this.deltaY = window.pageYOffset
- || document.documentElement.scrollTop
- || document.body.scrollTop
- || 0;
- },
-
- realOffset: function(element) {
- var valueT = 0, valueL = 0;
- do {
- valueT += element.scrollTop || 0;
- valueL += element.scrollLeft || 0;
- element = element.parentNode;
- } while (element);
- return [valueL, valueT];
- },
-
- cumulativeOffset: function(element) {
- var valueT = 0, valueL = 0;
- do {
- valueT += element.offsetTop || 0;
- valueL += element.offsetLeft || 0;
- element = element.offsetParent;
- } while (element);
- return [valueL, valueT];
- },
-
- positionedOffset: function(element) {
- var valueT = 0, valueL = 0;
- do {
- valueT += element.offsetTop || 0;
- valueL += element.offsetLeft || 0;
- element = element.offsetParent;
- if (element) {
- if(element.tagName=='BODY') break;
- var p = Element.getStyle(element, 'position');
- if (p == 'relative' || p == 'absolute') break;
- }
- } while (element);
- return [valueL, valueT];
- },
-
- offsetParent: function(element) {
- if (element.offsetParent) return element.offsetParent;
- if (element == document.body) return element;
-
- while ((element = element.parentNode) && element != document.body)
- if (Element.getStyle(element, 'position') != 'static')
- return element;
-
- return document.body;
- },
-
- // caches x/y coordinate pair to use with overlap
- within: function(element, x, y) {
- if (this.includeScrollOffsets)
- return this.withinIncludingScrolloffsets(element, x, y);
- this.xcomp = x;
- this.ycomp = y;
- this.offset = this.cumulativeOffset(element);
-
- return (y >= this.offset[1] &&
- y < this.offset[1] + element.offsetHeight &&
- x >= this.offset[0] &&
- x < this.offset[0] + element.offsetWidth);
- },
-
- withinIncludingScrolloffsets: function(element, x, y) {
- var offsetcache = this.realOffset(element);
-
- this.xcomp = x + offsetcache[0] - this.deltaX;
- this.ycomp = y + offsetcache[1] - this.deltaY;
- this.offset = this.cumulativeOffset(element);
-
- return (this.ycomp >= this.offset[1] &&
- this.ycomp < this.offset[1] + element.offsetHeight &&
- this.xcomp >= this.offset[0] &&
- this.xcomp < this.offset[0] + element.offsetWidth);
- },
-
- // within must be called directly before
- overlap: function(mode, element) {
- if (!mode) return 0;
- if (mode == 'vertical')
- return ((this.offset[1] + element.offsetHeight) - this.ycomp) /
- element.offsetHeight;
- if (mode == 'horizontal')
- return ((this.offset[0] + element.offsetWidth) - this.xcomp) /
- element.offsetWidth;
- },
-
- page: function(forElement) {
- var valueT = 0, valueL = 0;
-
- var element = forElement;
- do {
- valueT += element.offsetTop || 0;
- valueL += element.offsetLeft || 0;
-
- // Safari fix
- if (element.offsetParent==document.body)
- if (Element.getStyle(element,'position')=='absolute') break;
-
- } while (element = element.offsetParent);
-
- element = forElement;
- do {
- if (!window.opera || element.tagName=='BODY') {
- valueT -= element.scrollTop || 0;
- valueL -= element.scrollLeft || 0;
- }
- } while (element = element.parentNode);
-
- return [valueL, valueT];
- },
-
- clone: function(source, target) {
- var options = Object.extend({
- setLeft: true,
- setTop: true,
- setWidth: true,
- setHeight: true,
- offsetTop: 0,
- offsetLeft: 0
- }, arguments[2] || {})
-
- // find page position of source
- source = $(source);
- var p = Position.page(source);
-
- // find coordinate system to use
- target = $(target);
- var delta = [0, 0];
- var parent = null;
- // delta [0,0] will do fine with position: fixed elements,
- // position:absolute needs offsetParent deltas
- if (Element.getStyle(target,'position') == 'absolute') {
- parent = Position.offsetParent(target);
- delta = Position.page(parent);
- }
-
- // correct by body offsets (fixes Safari)
- if (parent == document.body) {
- delta[0] -= document.body.offsetLeft;
- delta[1] -= document.body.offsetTop;
- }
-
- // set position
- if(options.setLeft) target.style.left = (p[0] - delta[0] + options.offsetLeft) + 'px';
- if(options.setTop) target.style.top = (p[1] - delta[1] + options.offsetTop) + 'px';
- if(options.setWidth) target.style.width = source.offsetWidth + 'px';
- if(options.setHeight) target.style.height = source.offsetHeight + 'px';
- },
-
- absolutize: function(element) {
- element = $(element);
- if (element.style.position == 'absolute') return;
- Position.prepare();
-
- var offsets = Position.positionedOffset(element);
- var top = offsets[1];
- var left = offsets[0];
- var width = element.clientWidth;
- var height = element.clientHeight;
-
- element._originalLeft = left - parseFloat(element.style.left || 0);
- element._originalTop = top - parseFloat(element.style.top || 0);
- element._originalWidth = element.style.width;
- element._originalHeight = element.style.height;
-
- element.style.position = 'absolute';
- element.style.top = top + 'px';
- element.style.left = left + 'px';
- element.style.width = width + 'px';
- element.style.height = height + 'px';
- },
-
- relativize: function(element) {
- element = $(element);
- if (element.style.position == 'relative') return;
- Position.prepare();
-
- element.style.position = 'relative';
- var top = parseFloat(element.style.top || 0) - (element._originalTop || 0);
- var left = parseFloat(element.style.left || 0) - (element._originalLeft || 0);
-
- element.style.top = top + 'px';
- element.style.left = left + 'px';
- element.style.height = element._originalHeight;
- element.style.width = element._originalWidth;
- }
-}
-
-// Safari returns margins on body which is incorrect if the child is absolutely
-// positioned. For performance reasons, redefine Position.cumulativeOffset for
-// KHTML/WebKit only.
-if (/Konqueror|Safari|KHTML/.test(navigator.userAgent)) {
- Position.cumulativeOffset = function(element) {
- var valueT = 0, valueL = 0;
- do {
- valueT += element.offsetTop || 0;
- valueL += element.offsetLeft || 0;
- if (element.offsetParent == document.body)
- if (Element.getStyle(element, 'position') == 'absolute') break;
-
- element = element.offsetParent;
- } while (element);
-
- return [valueL, valueT];
- }
-}
-
-Element.addMethods();
-
// ------------------------------------------------------------------------
// ------------------------------------------------------------------------
@@ -3431,4 +933,3 @@ function renderScene(){
raytracer.renderScene(scene, null, 0);
}
-
diff --git a/deps/v8/benchmarks/revisions.html b/deps/v8/benchmarks/revisions.html
index bba53dc60..458f8db69 100644
--- a/deps/v8/benchmarks/revisions.html
+++ b/deps/v8/benchmarks/revisions.html
@@ -20,6 +20,25 @@ the benchmark suite.
</p>
+<div class="subtitle"><h3>Version 5 (<a href="http://v8.googlecode.com/svn/data/benchmarks/v5/run.html">link</a>)</h3></div>
+
+<p>Removed a duplicate line in the base random seed code.
+</p>
+
+<div class="subtitle"><h3>Version 4 (<a href="http://v8.googlecode.com/svn/data/benchmarks/v4/run.html">link</a>)</h3></div>
+
+<p>The <i>Splay</i> benchmark is a newcomer in version 4. It
+manipulates a splay tree by adding and removing data nodes, thus
+exercising the memory management subsystem of the JavaScript engine.
+</p>
+
+<p>
+Furthermore, all the unused parts of the Prototype library were
+removed from the RayTrace benchmark. This does not affect the running
+of the benchmark.
+</p>
+
+
<div class="subtitle"><h3>Version 3 (<a href="http://v8.googlecode.com/svn/data/benchmarks/v3/run.html">link</a>)</h3></div>
<p>Version 3 adds a new benchmark, <i>RegExp</i>. The RegExp
@@ -32,9 +51,10 @@ encoded using ROT13 in a way that does not affect how the regexps
match their input.
</p>
+
<div class="subtitle"><h3>Version 2 (<a href="http://v8.googlecode.com/svn/data/benchmarks/v2/run.html">link</a>)</h3></div>
-<p>For version 2 the crypto benchmark was fixed. Previously, the
+<p>For version 2 the Crypto benchmark was fixed. Previously, the
decryption stage was given plaintext as input, which resulted in an
error. Now, the decryption stage is given the output of the
encryption stage as input. The result is checked against the original
@@ -49,6 +69,7 @@ results of their calculations. This is to avoid accidentally
obtaining scores that are the result of an incorrect JavaScript engine
optimization.</p>
+
<div class="subtitle"><h3>Version 1 (<a href="http://v8.googlecode.com/svn/data/benchmarks/v1/run.html">link</a>)</h3></div>
<p>Initial release.</p>
diff --git a/deps/v8/benchmarks/richards.js b/deps/v8/benchmarks/richards.js
index bb88623cd..c9368eff9 100644
--- a/deps/v8/benchmarks/richards.js
+++ b/deps/v8/benchmarks/richards.js
@@ -30,7 +30,7 @@
// benchmark from:
//
// http://www.cl.cam.ac.uk/~mr10/Bench.html
-//
+//
// The benchmark was originally implemented in BCPL by
// Martin Richards.
diff --git a/deps/v8/benchmarks/run.html b/deps/v8/benchmarks/run.html
index 8663cf10e..6adb6d27a 100644
--- a/deps/v8/benchmarks/run.html
+++ b/deps/v8/benchmarks/run.html
@@ -8,6 +8,7 @@
<script type="text/javascript" src="raytrace.js"></script>
<script type="text/javascript" src="earley-boyer.js"></script>
<script type="text/javascript" src="regexp.js"></script>
+<script type="text/javascript" src="splay.js"></script>
<link type="text/css" rel="stylesheet" href="style.css"></link>
<script type="text/javascript">
var completed = 0;
@@ -72,12 +73,13 @@ higher scores means better performance: <em>Bigger is better!</em>
<ul>
<li><b>Richards</b><br/>OS kernel simulation benchmark, originally written in BCPL by Martin Richards (<i>539 lines</i>).</li>
<li><b>DeltaBlue</b><br/>One-way constraint solver, originally written in Smalltalk by John Maloney and Mario Wolczko (<i>880 lines</i>).</li>
-<li><b>Crypto</b><br/>Encryption and decryption benchmark based on code by Tom Wu (<i>1689 lines</i>).</li>
-<li><b>RayTrace</b><br/>Ray tracer benchmark based on code by <a href="http://flog.co.nz/">Adam Burmister</a> (<i>3418 lines</i>).</li>
-<li><b>EarleyBoyer</b><br/>Classic Scheme benchmarks, translated to JavaScript by Florian Loitsch's Scheme2Js compiler (<i>4682 lines</i>).</li>
+<li><b>Crypto</b><br/>Encryption and decryption benchmark based on code by Tom Wu (<i>1698 lines</i>).</li>
+<li><b>RayTrace</b><br/>Ray tracer benchmark based on code by <a href="http://flog.co.nz/">Adam Burmister</a> (<i>935 lines</i>).</li>
+<li><b>EarleyBoyer</b><br/>Classic Scheme benchmarks, translated to JavaScript by Florian Loitsch's Scheme2Js compiler (<i>4685 lines</i>).</li>
<li><b>RegExp</b><br/>Regular expression benchmark generated by extracting regular expression operations from 50 of the most popular web pages
-(<i>4758 lines</i>).
+(<i>1614 lines</i>).
</li>
+<li><b>Splay</b><br/>Data manipulation benchmark that deals with splay trees and exercises the automatic memory management subsystem (<i>378 lines</i>).</li>
</ul>
<p>
@@ -90,7 +92,7 @@ the <a href="http://v8.googlecode.com/svn/data/benchmarks/current/revisions.html
</td><td style="text-align: center">
<div class="run">
- <div id="status" style="text-align: center; margin-top: 60px; font-size: 120%; font-weight: bold;">Starting...</div>
+ <div id="status" style="text-align: center; margin-top: 50px; font-size: 120%; font-weight: bold;">Starting...</div>
<div style="text-align: left; margin: 30px 0 0 90px;" id="results">
<div>
</div>
diff --git a/deps/v8/benchmarks/run.js b/deps/v8/benchmarks/run.js
index bdf1fb1ff..da95fb498 100644
--- a/deps/v8/benchmarks/run.js
+++ b/deps/v8/benchmarks/run.js
@@ -33,6 +33,7 @@ load('crypto.js');
load('raytrace.js');
load('earley-boyer.js');
load('regexp.js');
+load('splay.js');
var success = true;
diff --git a/deps/v8/benchmarks/splay.js b/deps/v8/benchmarks/splay.js
new file mode 100644
index 000000000..53fc72793
--- /dev/null
+++ b/deps/v8/benchmarks/splay.js
@@ -0,0 +1,378 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// This benchmark is based on a JavaScript log processing module used
+// by the V8 profiler to generate execution time profiles for runs of
+// JavaScript applications, and it effectively measures how fast the
+// JavaScript engine is at allocating nodes and reclaiming the memory
+// used for old nodes. Because of the way splay trees work, the engine
+// also has to deal with a lot of changes to the large tree object
+// graph.
+
+var Splay = new BenchmarkSuite('Splay', 126125, [
+ new Benchmark("Splay", SplayRun, SplaySetup, SplayTearDown)
+]);
+
+
+// Configuration.
+var kSplayTreeSize = 8000;
+var kSplayTreeModifications = 80;
+var kSplayTreePayloadDepth = 5;
+
+var splayTree = null;
+
+
+function GeneratePayloadTree(depth, key) {
+ if (depth == 0) {
+ return {
+ array : [ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 ],
+ string : 'String for key ' + key + ' in leaf node'
+ };
+ } else {
+ return {
+ left: GeneratePayloadTree(depth - 1, key),
+ right: GeneratePayloadTree(depth - 1, key)
+ };
+ }
+}
+
+
+function GenerateKey() {
+ // The benchmark framework guarantees that Math.random is
+ // deterministic; see base.js.
+ return Math.random();
+}
+
+
+function InsertNewNode() {
+ // Insert new node with a unique key.
+ var key;
+ do {
+ key = GenerateKey();
+ } while (splayTree.find(key) != null);
+ splayTree.insert(key, GeneratePayloadTree(kSplayTreePayloadDepth, key));
+ return key;
+}
+
+
+
+function SplaySetup() {
+ splayTree = new SplayTree();
+ for (var i = 0; i < kSplayTreeSize; i++) InsertNewNode();
+}
+
+
+function SplayTearDown() {
+ // Allow the garbage collector to reclaim the memory
+ // used by the splay tree no matter how we exit the
+ // tear down function.
+ var keys = splayTree.exportKeys();
+ splayTree = null;
+
+ // Verify that the splay tree has the right size.
+ var length = keys.length;
+ if (length != kSplayTreeSize) {
+ throw new Error("Splay tree has wrong size");
+ }
+
+ // Verify that the splay tree has sorted, unique keys.
+ for (var i = 0; i < length - 1; i++) {
+ if (keys[i] >= keys[i + 1]) {
+ throw new Error("Splay tree not sorted");
+ }
+ }
+}
+
+
+function SplayRun() {
+ // Replace a few nodes in the splay tree.
+ for (var i = 0; i < kSplayTreeModifications; i++) {
+ var key = InsertNewNode();
+ var greatest = splayTree.findGreatestLessThan(key);
+ if (greatest == null) splayTree.remove(key);
+ else splayTree.remove(greatest.key);
+ }
+}
+
+
+/**
+ * Constructs a Splay tree. A splay tree is a self-balancing binary
+ * search tree with the additional property that recently accessed
+ * elements are quick to access again. It performs basic operations
+ * such as insertion, look-up and removal in O(log(n)) amortized time.
+ *
+ * @constructor
+ */
+function SplayTree() {
+};
+
+
+/**
+ * Pointer to the root node of the tree.
+ *
+ * @type {SplayTree.Node}
+ * @private
+ */
+SplayTree.prototype.root_ = null;
+
+
+/**
+ * @return {boolean} Whether the tree is empty.
+ */
+SplayTree.prototype.isEmpty = function() {
+ return !this.root_;
+};
+
+
+/**
+ * Inserts a node into the tree with the specified key and value if
+ * the tree does not already contain a node with the specified key. If
+ * the value is inserted, it becomes the root of the tree.
+ *
+ * @param {number} key Key to insert into the tree.
+ * @param {*} value Value to insert into the tree.
+ */
+SplayTree.prototype.insert = function(key, value) {
+ if (this.isEmpty()) {
+ this.root_ = new SplayTree.Node(key, value);
+ return;
+ }
+ // Splay on the key to move the last node on the search path for
+ // the key to the root of the tree.
+ this.splay_(key);
+ if (this.root_.key == key) {
+ return;
+ }
+ var node = new SplayTree.Node(key, value);
+ if (key > this.root_.key) {
+ node.left = this.root_;
+ node.right = this.root_.right;
+ this.root_.right = null;
+ } else {
+ node.right = this.root_;
+ node.left = this.root_.left;
+ this.root_.left = null;
+ }
+ this.root_ = node;
+};
+
+
+/**
+ * Removes a node with the specified key from the tree if the tree
+ * contains a node with this key. The removed node is returned. If the
+ * key is not found, an exception is thrown.
+ *
+ * @param {number} key Key to find and remove from the tree.
+ * @return {SplayTree.Node} The removed node.
+ */
+SplayTree.prototype.remove = function(key) {
+ if (this.isEmpty()) {
+ throw Error('Key not found: ' + key);
+ }
+ this.splay_(key);
+ if (this.root_.key != key) {
+ throw Error('Key not found: ' + key);
+ }
+ var removed = this.root_;
+ if (!this.root_.left) {
+ this.root_ = this.root_.right;
+ } else {
+ var right = this.root_.right;
+ this.root_ = this.root_.left;
+ // Splay to make sure that the new root has an empty right child.
+ this.splay_(key);
+ // Insert the original right child as the right child of the new
+ // root.
+ this.root_.right = right;
+ }
+ return removed;
+};
+
+
+/**
+ * Returns the node having the specified key or null if the tree doesn't contain
+ * a node with the specified key.
+ *
+ * @param {number} key Key to find in the tree.
+ * @return {SplayTree.Node} Node having the specified key.
+ */
+SplayTree.prototype.find = function(key) {
+ if (this.isEmpty()) {
+ return null;
+ }
+ this.splay_(key);
+ return this.root_.key == key ? this.root_ : null;
+};
+
+
+/**
+ * @return {SplayTree.Node} Node having the maximum key value that
+ * is less or equal to the specified key value.
+ */
+SplayTree.prototype.findGreatestLessThan = function(key) {
+ if (this.isEmpty()) {
+ return null;
+ }
+ // Splay on the key to move the node with the given key or the last
+ // node on the search path to the top of the tree.
+ this.splay_(key);
+ // Now the result is either the root node or the greatest node in
+ // the left subtree.
+ if (this.root_.key <= key) {
+ return this.root_;
+ } else if (this.root_.left) {
+ return this.findMax(this.root_.left);
+ } else {
+ return null;
+ }
+};
+
+
+/**
+ * @return {Array<*>} An array containing all the keys of tree's nodes.
+ */
+SplayTree.prototype.exportKeys = function() {
+ var result = [];
+ if (!this.isEmpty()) {
+ this.root_.traverse_(function(node) { result.push(node.key); });
+ }
+ return result;
+};
+
+
+/**
+ * Perform the splay operation for the given key. Moves the node with
+ * the given key to the top of the tree. If no node has the given
+ * key, the last node on the search path is moved to the top of the
+ * tree. This is the simplified top-down splaying algorithm from:
+ * "Self-adjusting Binary Search Trees" by Sleator and Tarjan
+ *
+ * @param {number} key Key to splay the tree on.
+ * @private
+ */
+SplayTree.prototype.splay_ = function(key) {
+ if (this.isEmpty()) {
+ return;
+ }
+ // Create a dummy node. The use of the dummy node is a bit
+ // counter-intuitive: The right child of the dummy node will hold
+ // the L tree of the algorithm. The left child of the dummy node
+ // will hold the R tree of the algorithm. Using a dummy node, left
+ // and right will always be nodes and we avoid special cases.
+ var dummy, left, right;
+ dummy = left = right = new SplayTree.Node(null, null);
+ var current = this.root_;
+ while (true) {
+ if (key < current.key) {
+ if (!current.left) {
+ break;
+ }
+ if (key < current.left.key) {
+ // Rotate right.
+ var tmp = current.left;
+ current.left = tmp.right;
+ tmp.right = current;
+ current = tmp;
+ if (!current.left) {
+ break;
+ }
+ }
+ // Link right.
+ right.left = current;
+ right = current;
+ current = current.left;
+ } else if (key > current.key) {
+ if (!current.right) {
+ break;
+ }
+ if (key > current.right.key) {
+ // Rotate left.
+ var tmp = current.right;
+ current.right = tmp.left;
+ tmp.left = current;
+ current = tmp;
+ if (!current.right) {
+ break;
+ }
+ }
+ // Link left.
+ left.right = current;
+ left = current;
+ current = current.right;
+ } else {
+ break;
+ }
+ }
+ // Assemble.
+ left.right = current.left;
+ right.left = current.right;
+ current.left = dummy.right;
+ current.right = dummy.left;
+ this.root_ = current;
+};
+
+
+/**
+ * Constructs a Splay tree node.
+ *
+ * @param {number} key Key.
+ * @param {*} value Value.
+ */
+SplayTree.Node = function(key, value) {
+ this.key = key;
+ this.value = value;
+};
+
+
+/**
+ * @type {SplayTree.Node}
+ */
+SplayTree.Node.prototype.left = null;
+
+
+/**
+ * @type {SplayTree.Node}
+ */
+SplayTree.Node.prototype.right = null;
+
+
+/**
+ * Performs an ordered traversal of the subtree starting at
+ * this SplayTree.Node.
+ *
+ * @param {function(SplayTree.Node)} f Visitor function.
+ * @private
+ */
+SplayTree.Node.prototype.traverse_ = function(f) {
+ var current = this;
+ while (current) {
+ var left = current.left;
+ if (left) left.traverse_(f);
+ f(current);
+ current = current.right;
+ }
+};
diff --git a/deps/v8/include/v8-debug.h b/deps/v8/include/v8-debug.h
index 3adc0a0d5..1a4840e8d 100644
--- a/deps/v8/include/v8-debug.h
+++ b/deps/v8/include/v8-debug.h
@@ -75,7 +75,8 @@ enum DebugEvent {
Exception = 2,
NewFunction = 3,
BeforeCompile = 4,
- AfterCompile = 5
+ AfterCompile = 5,
+ ScriptCollected = 6
};
diff --git a/deps/v8/include/v8.h b/deps/v8/include/v8.h
index 9f59e4e6b..87ce2a20c 100644
--- a/deps/v8/include/v8.h
+++ b/deps/v8/include/v8.h
@@ -41,6 +41,10 @@
#include <stdio.h>
#ifdef _WIN32
+// When compiling on MinGW stdint.h is available.
+#ifdef __MINGW32__
+#include <stdint.h>
+#else // __MINGW32__
typedef signed char int8_t;
typedef unsigned char uint8_t;
typedef short int16_t; // NOLINT
@@ -49,7 +53,8 @@ typedef int int32_t;
typedef unsigned int uint32_t;
typedef __int64 int64_t;
typedef unsigned __int64 uint64_t;
-// intptr_t is defined in crtdefs.h through stdio.h.
+// intptr_t and friends are defined in crtdefs.h through stdio.h.
+#endif // __MINGW32__
// Setup for Windows DLL export/import. When building the V8 DLL the
// BUILDING_V8_SHARED needs to be defined. When building a program which uses
@@ -1051,7 +1056,7 @@ class V8EXPORT Object : public Value {
Handle<Value> value,
PropertyAttribute attribs = None);
- // Sets a local property on this object, bypassing interceptors and
+ // Sets a local property on this object bypassing interceptors and
// overriding accessors or read-only properties.
//
// Note that if the object has an interceptor the property will be set
@@ -1062,13 +1067,21 @@ class V8EXPORT Object : public Value {
bool ForceSet(Handle<Value> key,
Handle<Value> value,
PropertyAttribute attribs = None);
+
Local<Value> Get(Handle<Value> key);
// TODO(1245389): Replace the type-specific versions of these
// functions with generic ones that accept a Handle<Value> key.
bool Has(Handle<String> key);
+
bool Delete(Handle<String> key);
+
+ // Delete a property on this object bypassing interceptors and
+ // ignoring dont-delete attributes.
+ bool ForceDelete(Handle<Value> key);
+
bool Has(uint32_t index);
+
bool Delete(uint32_t index);
/**
@@ -2080,6 +2093,11 @@ class V8EXPORT V8 {
static void ResumeProfiler();
/**
+ * Return whether profiler is currently paused.
+ */
+ static bool IsProfilerPaused();
+
+ /**
* If logging is performed into a memory buffer (via --logfile=*), allows to
* retrieve previously written messages. This can be used for retrieving
* profiler log data in the application. This function is thread-safe.
@@ -2246,6 +2264,13 @@ class V8EXPORT Context {
static Local<Context> GetCurrent();
/**
+ * Returns the context of the calling JavaScript code. That is the
+ * context of the top-most JavaScript frame. If there are no
+ * JavaScript frames an empty handle is returned.
+ */
+ static Local<Context> GetCalling();
+
+ /**
* Sets the security token for the context. To access an object in
* another context, the security tokens must match.
*/
diff --git a/deps/v8/src/SConscript b/deps/v8/src/SConscript
index 59c452b7c..64d20631c 100755
--- a/deps/v8/src/SConscript
+++ b/deps/v8/src/SConscript
@@ -43,14 +43,14 @@ SOURCES = {
'flags.cc', 'frames.cc', 'func-name-inferrer.cc',
'global-handles.cc', 'handles.cc', 'hashmap.cc',
'heap.cc', 'ic.cc', 'interpreter-irregexp.cc', 'jsregexp.cc',
- 'jump-target.cc', 'log.cc', 'mark-compact.cc', 'messages.cc', 'objects.cc',
- 'oprofile-agent.cc', 'parser.cc', 'property.cc', 'regexp-macro-assembler.cc',
- 'regexp-macro-assembler-irregexp.cc', 'regexp-stack.cc',
- 'register-allocator.cc', 'rewriter.cc', 'runtime.cc', 'scanner.cc',
- 'scopeinfo.cc', 'scopes.cc', 'serialize.cc', 'snapshot-common.cc',
- 'spaces.cc', 'string-stream.cc', 'stub-cache.cc', 'token.cc', 'top.cc',
- 'unicode.cc', 'usage-analyzer.cc', 'utils.cc', 'v8-counters.cc',
- 'v8.cc', 'v8threads.cc', 'variables.cc', 'version.cc',
+ 'jump-target.cc', 'log.cc', 'log-utils.cc', 'mark-compact.cc', 'messages.cc',
+ 'objects.cc', 'oprofile-agent.cc', 'parser.cc', 'property.cc',
+ 'regexp-macro-assembler.cc', 'regexp-macro-assembler-irregexp.cc',
+ 'regexp-stack.cc', 'register-allocator.cc', 'rewriter.cc', 'runtime.cc',
+ 'scanner.cc', 'scopeinfo.cc', 'scopes.cc', 'serialize.cc',
+ 'snapshot-common.cc', 'spaces.cc', 'string-stream.cc', 'stub-cache.cc',
+ 'token.cc', 'top.cc', 'unicode.cc', 'usage-analyzer.cc', 'utils.cc',
+ 'v8-counters.cc', 'v8.cc', 'v8threads.cc', 'variables.cc', 'version.cc',
'virtual-frame.cc', 'zone.cc'
],
'arch:arm': [
diff --git a/deps/v8/src/accessors.cc b/deps/v8/src/accessors.cc
index 4cd93be87..ac6cdf95a 100644
--- a/deps/v8/src/accessors.cc
+++ b/deps/v8/src/accessors.cc
@@ -34,7 +34,8 @@
#include "top.h"
#include "zone-inl.h"
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
template <class C>
@@ -288,6 +289,24 @@ const AccessorDescriptor Accessors::ScriptType = {
//
+// Accessors::ScriptCompilationType
+//
+
+
+Object* Accessors::ScriptGetCompilationType(Object* object, void*) {
+ Object* script = JSValue::cast(object)->value();
+ return Script::cast(script)->compilation_type();
+}
+
+
+const AccessorDescriptor Accessors::ScriptCompilationType = {
+ ScriptGetCompilationType,
+ IllegalSetter,
+ 0
+};
+
+
+//
// Accessors::ScriptGetLineEnds
//
@@ -313,9 +332,8 @@ const AccessorDescriptor Accessors::ScriptLineEnds = {
Object* Accessors::ScriptGetContextData(Object* object, void*) {
- HandleScope scope;
- Handle<Script> script(Script::cast(JSValue::cast(object)->value()));
- return script->context_data();
+ Object* script = JSValue::cast(object)->value();
+ return Script::cast(script)->context_data();
}
@@ -327,6 +345,54 @@ const AccessorDescriptor Accessors::ScriptContextData = {
//
+// Accessors::ScriptGetEvalFromFunction
+//
+
+
+Object* Accessors::ScriptGetEvalFromFunction(Object* object, void*) {
+ Object* script = JSValue::cast(object)->value();
+ return Script::cast(script)->eval_from_function();
+}
+
+
+const AccessorDescriptor Accessors::ScriptEvalFromFunction = {
+ ScriptGetEvalFromFunction,
+ IllegalSetter,
+ 0
+};
+
+
+//
+// Accessors::ScriptGetEvalFromPosition
+//
+
+
+Object* Accessors::ScriptGetEvalFromPosition(Object* object, void*) {
+ HandleScope scope;
+ Handle<Script> script(Script::cast(JSValue::cast(object)->value()));
+
+ // If this is not a script compiled through eval there is no eval position.
+ int compilation_type = Smi::cast(script->compilation_type())->value();
+ if (compilation_type != Script::COMPILATION_TYPE_EVAL) {
+ return Heap::undefined_value();
+ }
+
+ // Get the function from where eval was called and find the source position
+ // from the instruction offset.
+ Handle<Code> code(JSFunction::cast(script->eval_from_function())->code());
+ return Smi::FromInt(code->SourcePosition(code->instruction_start() +
+ script->eval_from_instructions_offset()->value()));
+}
+
+
+const AccessorDescriptor Accessors::ScriptEvalFromPosition = {
+ ScriptGetEvalFromPosition,
+ IllegalSetter,
+ 0
+};
+
+
+//
// Accessors::FunctionPrototype
//
diff --git a/deps/v8/src/accessors.h b/deps/v8/src/accessors.h
index 1dd8fdd2f..51d322ec8 100644
--- a/deps/v8/src/accessors.h
+++ b/deps/v8/src/accessors.h
@@ -28,27 +28,31 @@
#ifndef V8_ACCESSORS_H_
#define V8_ACCESSORS_H_
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
// The list of accessor descriptors. This is a second-order macro
// taking a macro to be applied to all accessor descriptor names.
#define ACCESSOR_DESCRIPTOR_LIST(V) \
- V(FunctionPrototype) \
- V(FunctionLength) \
- V(FunctionName) \
- V(FunctionArguments) \
- V(FunctionCaller) \
- V(ArrayLength) \
- V(StringLength) \
- V(ScriptSource) \
- V(ScriptName) \
- V(ScriptId) \
- V(ScriptLineOffset) \
- V(ScriptColumnOffset) \
- V(ScriptData) \
- V(ScriptType) \
- V(ScriptLineEnds) \
- V(ScriptContextData) \
+ V(FunctionPrototype) \
+ V(FunctionLength) \
+ V(FunctionName) \
+ V(FunctionArguments) \
+ V(FunctionCaller) \
+ V(ArrayLength) \
+ V(StringLength) \
+ V(ScriptSource) \
+ V(ScriptName) \
+ V(ScriptId) \
+ V(ScriptLineOffset) \
+ V(ScriptColumnOffset) \
+ V(ScriptData) \
+ V(ScriptType) \
+ V(ScriptCompilationType) \
+ V(ScriptLineEnds) \
+ V(ScriptContextData) \
+ V(ScriptEvalFromFunction) \
+ V(ScriptEvalFromPosition) \
V(ObjectPrototype)
// Accessors contains all predefined proxy accessors.
@@ -88,8 +92,11 @@ class Accessors : public AllStatic {
static Object* ScriptGetColumnOffset(Object* object, void*);
static Object* ScriptGetData(Object* object, void*);
static Object* ScriptGetType(Object* object, void*);
+ static Object* ScriptGetCompilationType(Object* object, void*);
static Object* ScriptGetLineEnds(Object* object, void*);
static Object* ScriptGetContextData(Object* object, void*);
+ static Object* ScriptGetEvalFromFunction(Object* object, void*);
+ static Object* ScriptGetEvalFromPosition(Object* object, void*);
static Object* ObjectGetPrototype(Object* receiver, void*);
static Object* ObjectSetPrototype(JSObject* receiver, Object* value, void*);
diff --git a/deps/v8/src/allocation.cc b/deps/v8/src/allocation.cc
index 3d26123bf..41724b68c 100644
--- a/deps/v8/src/allocation.cc
+++ b/deps/v8/src/allocation.cc
@@ -29,7 +29,8 @@
#include "v8.h"
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
void* Malloced::New(size_t size) {
diff --git a/deps/v8/src/allocation.h b/deps/v8/src/allocation.h
index a690f0835..586c4fd0d 100644
--- a/deps/v8/src/allocation.h
+++ b/deps/v8/src/allocation.h
@@ -28,7 +28,8 @@
#ifndef V8_ALLOCATION_H_
#define V8_ALLOCATION_H_
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
// A class that controls whether allocation is allowed. This is for
diff --git a/deps/v8/src/api.cc b/deps/v8/src/api.cc
index c250412e4..7b7f29081 100644
--- a/deps/v8/src/api.cc
+++ b/deps/v8/src/api.cc
@@ -99,7 +99,6 @@ static i::HandleScopeImplementer thread_local;
// --- E x c e p t i o n B e h a v i o r ---
-static bool has_shut_down = false;
static FatalErrorCallback exception_behavior = NULL;
@@ -123,7 +122,7 @@ static FatalErrorCallback& GetFatalErrorHandler() {
// When V8 cannot allocated memory FatalProcessOutOfMemory is called.
// The default fatal error handler is called and execution is stopped.
void i::V8::FatalProcessOutOfMemory(const char* location) {
- has_shut_down = true;
+ i::V8::SetFatalError();
FatalErrorCallback callback = GetFatalErrorHandler();
{
LEAVE_V8;
@@ -142,13 +141,13 @@ void V8::SetFatalErrorHandler(FatalErrorCallback that) {
bool Utils::ReportApiFailure(const char* location, const char* message) {
FatalErrorCallback callback = GetFatalErrorHandler();
callback(location, message);
- has_shut_down = true;
+ i::V8::SetFatalError();
return false;
}
bool V8::IsDead() {
- return has_shut_down;
+ return i::V8::IsDead();
}
@@ -186,7 +185,8 @@ static bool ReportEmptyHandle(const char* location) {
* yet been done.
*/
static inline bool IsDeadCheck(const char* location) {
- return has_shut_down ? ReportV8Dead(location) : false;
+ return !i::V8::IsRunning()
+ && i::V8::IsDead() ? ReportV8Dead(location) : false;
}
@@ -205,9 +205,14 @@ static inline bool EmptyCheck(const char* location, const v8::Data* obj) {
static i::StringInputBuffer write_input_buffer;
-static void EnsureInitialized(const char* location) {
- if (IsDeadCheck(location)) return;
- ApiCheck(v8::V8::Initialize(), location, "Error initializing V8");
+static inline bool EnsureInitialized(const char* location) {
+ if (i::V8::IsRunning()) {
+ return true;
+ }
+ if (IsDeadCheck(location)) {
+ return false;
+ }
+ return ApiCheck(v8::V8::Initialize(), location, "Error initializing V8");
}
@@ -225,29 +230,25 @@ void ImplementationUtilities::ZapHandleRange(void** begin, void** end) {
v8::Handle<v8::Primitive> ImplementationUtilities::Undefined() {
- if (IsDeadCheck("v8::Undefined()")) return v8::Handle<v8::Primitive>();
- EnsureInitialized("v8::Undefined()");
+ if (!EnsureInitialized("v8::Undefined()")) return v8::Handle<v8::Primitive>();
return v8::Handle<Primitive>(ToApi<Primitive>(i::Factory::undefined_value()));
}
v8::Handle<v8::Primitive> ImplementationUtilities::Null() {
- if (IsDeadCheck("v8::Null()")) return v8::Handle<v8::Primitive>();
- EnsureInitialized("v8::Null()");
+ if (!EnsureInitialized("v8::Null()")) return v8::Handle<v8::Primitive>();
return v8::Handle<Primitive>(ToApi<Primitive>(i::Factory::null_value()));
}
v8::Handle<v8::Boolean> ImplementationUtilities::True() {
- if (IsDeadCheck("v8::True()")) return v8::Handle<v8::Boolean>();
- EnsureInitialized("v8::True()");
+ if (!EnsureInitialized("v8::True()")) return v8::Handle<v8::Boolean>();
return v8::Handle<v8::Boolean>(ToApi<Boolean>(i::Factory::true_value()));
}
v8::Handle<v8::Boolean> ImplementationUtilities::False() {
- if (IsDeadCheck("v8::False()")) return v8::Handle<v8::Boolean>();
- EnsureInitialized("v8::False()");
+ if (!EnsureInitialized("v8::False()")) return v8::Handle<v8::Boolean>();
return v8::Handle<v8::Boolean>(ToApi<Boolean>(i::Factory::false_value()));
}
@@ -373,21 +374,21 @@ void V8::ClearWeak(void** obj) {
bool V8::IsGlobalNearDeath(void** obj) {
LOG_API("IsGlobalNearDeath");
- if (has_shut_down) return false;
+ if (!i::V8::IsRunning()) return false;
return i::GlobalHandles::IsNearDeath(reinterpret_cast<i::Object**>(obj));
}
bool V8::IsGlobalWeak(void** obj) {
LOG_API("IsGlobalWeak");
- if (has_shut_down) return false;
+ if (!i::V8::IsRunning()) return false;
return i::GlobalHandles::IsWeak(reinterpret_cast<i::Object**>(obj));
}
void V8::DisposeGlobal(void** obj) {
LOG_API("DisposeGlobal");
- if (has_shut_down) return;
+ if (!i::V8::IsRunning()) return;
i::Object** ptr = reinterpret_cast<i::Object**>(obj);
if ((*ptr)->IsGlobalContext()) i::Heap::NotifyContextDisposed();
i::GlobalHandles::Destroy(ptr);
@@ -415,7 +416,8 @@ int HandleScope::NumberOfHandles() {
void** v8::HandleScope::CreateHandle(void* value) {
- return i::HandleScope::CreateHandle(value);
+ return reinterpret_cast<void**>(
+ i::HandleScope::CreateHandle(reinterpret_cast<i::Object*>(value)));
}
@@ -431,7 +433,7 @@ void Context::Enter() {
void Context::Exit() {
- if (has_shut_down) return;
+ if (!i::V8::IsRunning()) return;
if (!ApiCheck(thread_local.LeaveLastContext(),
"v8::Context::Exit()",
"Cannot exit non-entered context")) {
@@ -1890,6 +1892,19 @@ bool v8::Object::ForceSet(v8::Handle<Value> key,
}
+bool v8::Object::ForceDelete(v8::Handle<Value> key) {
+ ON_BAILOUT("v8::Object::ForceDelete()", return false);
+ ENTER_V8;
+ i::Handle<i::JSObject> self = Utils::OpenHandle(this);
+ i::Handle<i::Object> key_obj = Utils::OpenHandle(*key);
+ EXCEPTION_PREAMBLE();
+ i::Handle<i::Object> obj = i::ForceDeleteProperty(self, key_obj);
+ has_pending_exception = obj.is_null();
+ EXCEPTION_BAILOUT_CHECK(false);
+ return obj->IsTrue();
+}
+
+
Local<Value> v8::Object::Get(v8::Handle<Value> key) {
ON_BAILOUT("v8::Object::Get()", return Local<v8::Value>());
ENTER_V8;
@@ -2450,7 +2465,7 @@ void v8::Object::SetInternalField(int index, v8::Handle<Value> value) {
// --- E n v i r o n m e n t ---
bool v8::V8::Initialize() {
- if (i::V8::HasBeenSetup()) return true;
+ if (i::V8::IsRunning()) return true;
ENTER_V8;
HandleScope scope;
if (i::Snapshot::Initialize()) {
@@ -2612,6 +2627,13 @@ v8::Local<v8::Context> Context::GetCurrent() {
}
+v8::Local<v8::Context> Context::GetCalling() {
+ if (IsDeadCheck("v8::Context::GetCalling()")) return Local<Context>();
+ i::Handle<i::Context> context(i::Top::GetCallingGlobalContext());
+ return Utils::ToLocal(context);
+}
+
+
v8::Local<v8::Object> Context::Global() {
if (IsDeadCheck("v8::Context::Global()")) return Local<v8::Object>();
i::Object** ctx = reinterpret_cast<i::Object**>(this);
@@ -3116,16 +3138,28 @@ void V8::PauseProfiler() {
#endif
}
+
void V8::ResumeProfiler() {
#ifdef ENABLE_LOGGING_AND_PROFILING
i::Logger::ResumeProfiler();
#endif
}
+
+bool V8::IsProfilerPaused() {
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ return i::Logger::IsProfilerPaused();
+#else
+ return true;
+#endif
+}
+
+
int V8::GetLogLines(int from_pos, char* dest_buf, int max_size) {
#ifdef ENABLE_LOGGING_AND_PROFILING
return i::Logger::GetLogLines(from_pos, dest_buf, max_size);
#endif
+ return 0;
}
String::Utf8Value::Utf8Value(v8::Handle<v8::Value> obj) {
@@ -3312,7 +3346,7 @@ bool Debug::SetDebugEventListener(v8::Handle<v8::Object> that,
void Debug::DebugBreak() {
- if (!i::V8::HasBeenSetup()) return;
+ if (!i::V8::IsRunning()) return;
i::StackGuard::DebugBreak();
}
@@ -3354,7 +3388,7 @@ void Debug::SetMessageHandler2(v8::Debug::MessageHandler2 handler) {
void Debug::SendCommand(const uint16_t* command, int length,
ClientData* client_data) {
- if (!i::V8::HasBeenSetup()) return;
+ if (!i::V8::IsRunning()) return;
i::Debugger::ProcessCommand(i::Vector<const uint16_t>(command, length),
client_data);
}
@@ -3370,7 +3404,7 @@ void Debug::SetHostDispatchHandler(HostDispatchHandler handler,
Handle<Value> Debug::Call(v8::Handle<v8::Function> fun,
v8::Handle<v8::Value> data) {
- if (!i::V8::HasBeenSetup()) return Handle<Value>();
+ if (!i::V8::IsRunning()) return Handle<Value>();
ON_BAILOUT("v8::Debug::Call()", return Handle<Value>());
ENTER_V8;
i::Handle<i::Object> result;
diff --git a/deps/v8/src/arguments.h b/deps/v8/src/arguments.h
index 2ec68ed20..80f90063b 100644
--- a/deps/v8/src/arguments.h
+++ b/deps/v8/src/arguments.h
@@ -28,7 +28,8 @@
#ifndef V8_ARGUMENTS_H_
#define V8_ARGUMENTS_H_
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
// Arguments provides access to runtime call parameters.
//
diff --git a/deps/v8/src/arm/assembler-arm-inl.h b/deps/v8/src/arm/assembler-arm-inl.h
index fe64761e3..824a5fda5 100644
--- a/deps/v8/src/arm/assembler-arm-inl.h
+++ b/deps/v8/src/arm/assembler-arm-inl.h
@@ -41,7 +41,8 @@
#include "cpu.h"
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
Condition NegateCondition(Condition cc) {
ASSERT(cc != al);
diff --git a/deps/v8/src/arm/assembler-arm.cc b/deps/v8/src/arm/assembler-arm.cc
index 191c865a8..6ec8f460b 100644
--- a/deps/v8/src/arm/assembler-arm.cc
+++ b/deps/v8/src/arm/assembler-arm.cc
@@ -39,7 +39,8 @@
#include "arm/assembler-arm-inl.h"
#include "serialize.h"
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
// -----------------------------------------------------------------------------
// Implementation of Register and CRegister
@@ -211,6 +212,7 @@ enum {
// Instruction bit masks
RdMask = 15 << 12, // in str instruction
CondMask = 15 << 28,
+ CoprocessorMask = 15 << 8,
OpCodeMask = 15 << 21, // in data-processing instructions
Imm24Mask = (1 << 24) - 1,
Off12Mask = (1 << 12) - 1,
@@ -616,7 +618,8 @@ void Assembler::addrmod4(Instr instr, Register rn, RegList rl) {
void Assembler::addrmod5(Instr instr, CRegister crd, const MemOperand& x) {
// unindexed addressing is not encoded by this function
- ASSERT((instr & ~(CondMask | P | U | N | W | L)) == (B27 | B26));
+ ASSERT_EQ((B27 | B26),
+ (instr & ~(CondMask | CoprocessorMask | P | U | N | W | L)));
ASSERT(x.rn_.is_valid() && !x.rm_.is_valid());
int am = x.am_;
int offset_8 = x.offset_;
diff --git a/deps/v8/src/arm/assembler-arm.h b/deps/v8/src/arm/assembler-arm.h
index d7535e0da..eeab4a72c 100644
--- a/deps/v8/src/arm/assembler-arm.h
+++ b/deps/v8/src/arm/assembler-arm.h
@@ -42,7 +42,8 @@
#include "assembler.h"
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
// CPU Registers.
//
@@ -83,8 +84,6 @@ struct Register {
};
-const int kNumRegisters = 16;
-
extern Register no_reg;
extern Register r0;
extern Register r1;
@@ -622,8 +621,8 @@ class Assembler : public Malloced {
// Pseudo instructions
void nop() { mov(r0, Operand(r0)); }
- void push(Register src) {
- str(src, MemOperand(sp, 4, NegPreIndex), al);
+ void push(Register src, Condition cond = al) {
+ str(src, MemOperand(sp, 4, NegPreIndex), cond);
}
void pop(Register dst) {
diff --git a/deps/v8/src/arm/builtins-arm.cc b/deps/v8/src/arm/builtins-arm.cc
index 9c7a42ab1..588798bde 100644
--- a/deps/v8/src/arm/builtins-arm.cc
+++ b/deps/v8/src/arm/builtins-arm.cc
@@ -31,7 +31,8 @@
#include "debug.h"
#include "runtime.h"
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
#define __ ACCESS_MASM(masm)
@@ -187,7 +188,7 @@ void Builtins::Generate_JSConstructCall(MacroAssembler* masm) {
// Set expected number of arguments to zero (not changing r0).
__ mov(r2, Operand(0));
- __ GetBuiltinEntry(r3, Builtins::CALL_NON_FUNCTION);
+ __ GetBuiltinEntry(r3, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
__ Jump(Handle<Code>(builtin(ArgumentsAdaptorTrampoline)),
RelocInfo::CODE_TARGET);
}
diff --git a/deps/v8/src/arm/codegen-arm-inl.h b/deps/v8/src/arm/codegen-arm-inl.h
new file mode 100644
index 000000000..544331a52
--- /dev/null
+++ b/deps/v8/src/arm/codegen-arm-inl.h
@@ -0,0 +1,46 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+#ifndef V8_ARM_CODEGEN_ARM_INL_H_
+#define V8_ARM_CODEGEN_ARM_INL_H_
+
+namespace v8 {
+namespace internal {
+
+#define __ ACCESS_MASM(masm_)
+
+// Platform-specific inline functions.
+
+void DeferredCode::Jump() { __ jmp(&entry_label_); }
+void DeferredCode::Branch(Condition cc) { __ b(cc, &entry_label_); }
+
+#undef __
+
+} } // namespace v8::internal
+
+#endif // V8_ARM_CODEGEN_ARM_INL_H_
diff --git a/deps/v8/src/arm/codegen-arm.cc b/deps/v8/src/arm/codegen-arm.cc
index 1930a7c2f..7428d3b59 100644
--- a/deps/v8/src/arm/codegen-arm.cc
+++ b/deps/v8/src/arm/codegen-arm.cc
@@ -36,10 +36,39 @@
#include "scopes.h"
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
#define __ ACCESS_MASM(masm_)
+// -------------------------------------------------------------------------
+// Platform-specific DeferredCode functions.
+
+void DeferredCode::SaveRegisters() {
+ for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
+ int action = registers_[i];
+ if (action == kPush) {
+ __ push(RegisterAllocator::ToRegister(i));
+ } else if (action != kIgnore && (action & kSyncedFlag) == 0) {
+ __ str(RegisterAllocator::ToRegister(i), MemOperand(fp, action));
+ }
+ }
+}
+
+
+void DeferredCode::RestoreRegisters() {
+ // Restore registers in reverse order due to the stack.
+ for (int i = RegisterAllocator::kNumRegisters - 1; i >= 0; i--) {
+ int action = registers_[i];
+ if (action == kPush) {
+ __ pop(RegisterAllocator::ToRegister(i));
+ } else if (action != kIgnore) {
+ action &= ~kSyncedFlag;
+ __ ldr(RegisterAllocator::ToRegister(i), MemOperand(fp, action));
+ }
+ }
+}
+
// -------------------------------------------------------------------------
// CodeGenState implementation.
@@ -108,7 +137,7 @@ void CodeGenerator::GenCode(FunctionLiteral* fun) {
RegisterAllocator register_allocator(this);
allocator_ = &register_allocator;
ASSERT(frame_ == NULL);
- frame_ = new VirtualFrame(this);
+ frame_ = new VirtualFrame();
cc_reg_ = al;
set_in_spilled_code(false);
{
@@ -133,13 +162,13 @@ void CodeGenerator::GenCode(FunctionLiteral* fun) {
#endif
// Allocate space for locals and initialize them.
- frame_->AllocateStackSlots(scope_->num_stack_slots());
+ frame_->AllocateStackSlots();
// Initialize the function return target after the locals are set
// up, because it needs the expected frame height from the frame.
- function_return_.Initialize(this, JumpTarget::BIDIRECTIONAL);
+ function_return_.set_direction(JumpTarget::BIDIRECTIONAL);
function_return_is_shadowed_ = false;
- VirtualFrame::SpilledScope spilled_scope(this);
+ VirtualFrame::SpilledScope spilled_scope;
if (scope_->num_heap_slots() > 0) {
// Allocate local context.
// Get outer context and create a new context based on it.
@@ -148,7 +177,7 @@ void CodeGenerator::GenCode(FunctionLiteral* fun) {
frame_->CallRuntime(Runtime::kNewContext, 1); // r0 holds the result
#ifdef DEBUG
- JumpTarget verified_true(this);
+ JumpTarget verified_true;
__ cmp(r0, Operand(cp));
verified_true.Branch(eq);
__ stop("NewContext: r0 is expected to be the same as cp");
@@ -288,9 +317,7 @@ void CodeGenerator::GenCode(FunctionLiteral* fun) {
DeleteFrame();
// Process any deferred code using the register allocator.
- if (HasStackOverflow()) {
- ClearDeferred();
- } else {
+ if (!HasStackOverflow()) {
ProcessDeferred();
}
@@ -456,14 +483,14 @@ void CodeGenerator::Load(Expression* x, TypeofState typeof_state) {
int original_height = frame_->height();
#endif
ASSERT(!in_spilled_code());
- JumpTarget true_target(this);
- JumpTarget false_target(this);
+ JumpTarget true_target;
+ JumpTarget false_target;
LoadCondition(x, typeof_state, &true_target, &false_target, false);
if (has_cc()) {
// Convert cc_reg_ into a boolean value.
- JumpTarget loaded(this);
- JumpTarget materialize_true(this);
+ JumpTarget loaded;
+ JumpTarget materialize_true;
materialize_true.Branch(cc_reg_);
__ mov(r0, Operand(Factory::false_value()));
frame_->EmitPush(r0);
@@ -478,7 +505,7 @@ void CodeGenerator::Load(Expression* x, TypeofState typeof_state) {
if (true_target.is_linked() || false_target.is_linked()) {
// We have at least one condition value that has been "translated"
// into a branch, thus it needs to be loaded explicitly.
- JumpTarget loaded(this);
+ JumpTarget loaded;
if (frame_ != NULL) {
loaded.Jump(); // Don't lose the current TOS.
}
@@ -510,14 +537,14 @@ void CodeGenerator::Load(Expression* x, TypeofState typeof_state) {
void CodeGenerator::LoadGlobal() {
- VirtualFrame::SpilledScope spilled_scope(this);
+ VirtualFrame::SpilledScope spilled_scope;
__ ldr(r0, GlobalObject());
frame_->EmitPush(r0);
}
void CodeGenerator::LoadGlobalReceiver(Register scratch) {
- VirtualFrame::SpilledScope spilled_scope(this);
+ VirtualFrame::SpilledScope spilled_scope;
__ ldr(scratch, ContextOperand(cp, Context::GLOBAL_INDEX));
__ ldr(scratch,
FieldMemOperand(scratch, GlobalObject::kGlobalReceiverOffset));
@@ -529,7 +556,7 @@ void CodeGenerator::LoadGlobalReceiver(Register scratch) {
// that we have the INSIDE_TYPEOF typeof state. => Need to handle global
// variables w/o reference errors elsewhere.
void CodeGenerator::LoadTypeofExpression(Expression* x) {
- VirtualFrame::SpilledScope spilled_scope(this);
+ VirtualFrame::SpilledScope spilled_scope;
Variable* variable = x->AsVariableProxy()->AsVariable();
if (variable != NULL && !variable->is_this() && variable->is_global()) {
// NOTE: This is somewhat nasty. We force the compiler to load
@@ -559,7 +586,7 @@ Reference::~Reference() {
void CodeGenerator::LoadReference(Reference* ref) {
- VirtualFrame::SpilledScope spilled_scope(this);
+ VirtualFrame::SpilledScope spilled_scope;
Comment cmnt(masm_, "[ LoadReference");
Expression* e = ref->expression();
Property* property = e->AsProperty();
@@ -602,7 +629,7 @@ void CodeGenerator::LoadReference(Reference* ref) {
void CodeGenerator::UnloadReference(Reference* ref) {
- VirtualFrame::SpilledScope spilled_scope(this);
+ VirtualFrame::SpilledScope spilled_scope;
// Pop a reference from the stack while preserving TOS.
Comment cmnt(masm_, "[ UnloadReference");
int size = ref->size();
@@ -619,7 +646,7 @@ void CodeGenerator::UnloadReference(Reference* ref) {
// may jump to 'false_target' in case the register converts to 'false'.
void CodeGenerator::ToBoolean(JumpTarget* true_target,
JumpTarget* false_target) {
- VirtualFrame::SpilledScope spilled_scope(this);
+ VirtualFrame::SpilledScope spilled_scope;
// Note: The generated code snippet does not change stack variables.
// Only the condition code should be set.
frame_->EmitPop(r0);
@@ -701,7 +728,7 @@ class GenericBinaryOpStub : public CodeStub {
void CodeGenerator::GenericBinaryOperation(Token::Value op,
OverwriteMode overwrite_mode) {
- VirtualFrame::SpilledScope spilled_scope(this);
+ VirtualFrame::SpilledScope spilled_scope;
// sp[0] : y
// sp[1] : x
// result : r0
@@ -756,13 +783,11 @@ void CodeGenerator::GenericBinaryOperation(Token::Value op,
class DeferredInlineSmiOperation: public DeferredCode {
public:
- DeferredInlineSmiOperation(CodeGenerator* generator,
- Token::Value op,
+ DeferredInlineSmiOperation(Token::Value op,
int value,
bool reversed,
OverwriteMode overwrite_mode)
- : DeferredCode(generator),
- op_(op),
+ : op_(op),
value_(value),
reversed_(reversed),
overwrite_mode_(overwrite_mode) {
@@ -780,17 +805,13 @@ class DeferredInlineSmiOperation: public DeferredCode {
void DeferredInlineSmiOperation::Generate() {
- enter()->Bind();
- VirtualFrame::SpilledScope spilled_scope(generator());
-
switch (op_) {
case Token::ADD: {
+ // Revert optimistic add.
if (reversed_) {
- // revert optimistic add
__ sub(r0, r0, Operand(Smi::FromInt(value_)));
__ mov(r1, Operand(Smi::FromInt(value_)));
} else {
- // revert optimistic add
__ sub(r1, r0, Operand(Smi::FromInt(value_)));
__ mov(r0, Operand(Smi::FromInt(value_)));
}
@@ -798,8 +819,8 @@ void DeferredInlineSmiOperation::Generate() {
}
case Token::SUB: {
+ // Revert optimistic sub.
if (reversed_) {
- // revert optimistic sub
__ rsb(r0, r0, Operand(Smi::FromInt(value_)));
__ mov(r1, Operand(Smi::FromInt(value_)));
} else {
@@ -828,24 +849,19 @@ void DeferredInlineSmiOperation::Generate() {
__ mov(r1, Operand(r0));
__ mov(r0, Operand(Smi::FromInt(value_)));
} else {
- UNREACHABLE(); // should have been handled in SmiOperation
+ UNREACHABLE(); // Should have been handled in SmiOperation.
}
break;
}
default:
- // other cases should have been handled before this point.
+ // Other cases should have been handled before this point.
UNREACHABLE();
break;
}
- GenericBinaryOpStub igostub(op_, overwrite_mode_);
- Result arg0 = generator()->allocator()->Allocate(r1);
- ASSERT(arg0.is_valid());
- Result arg1 = generator()->allocator()->Allocate(r0);
- ASSERT(arg1.is_valid());
- generator()->frame()->CallStub(&igostub, &arg0, &arg1);
- exit_.Jump();
+ GenericBinaryOpStub stub(op_, overwrite_mode_);
+ __ CallStub(&stub);
}
@@ -853,7 +869,7 @@ void CodeGenerator::SmiOperation(Token::Value op,
Handle<Object> value,
bool reversed,
OverwriteMode mode) {
- VirtualFrame::SpilledScope spilled_scope(this);
+ VirtualFrame::SpilledScope spilled_scope;
// NOTE: This is an attempt to inline (a bit) more of the code for
// some possible smi operations (like + and -) when (at least) one
// of the operands is a literal smi. With this optimization, the
@@ -865,34 +881,34 @@ void CodeGenerator::SmiOperation(Token::Value op,
int int_value = Smi::cast(*value)->value();
- JumpTarget exit(this);
+ JumpTarget exit;
frame_->EmitPop(r0);
switch (op) {
case Token::ADD: {
DeferredCode* deferred =
- new DeferredInlineSmiOperation(this, op, int_value, reversed, mode);
+ new DeferredInlineSmiOperation(op, int_value, reversed, mode);
__ add(r0, r0, Operand(value), SetCC);
- deferred->enter()->Branch(vs);
+ deferred->Branch(vs);
__ tst(r0, Operand(kSmiTagMask));
- deferred->enter()->Branch(ne);
+ deferred->Branch(ne);
deferred->BindExit();
break;
}
case Token::SUB: {
DeferredCode* deferred =
- new DeferredInlineSmiOperation(this, op, int_value, reversed, mode);
+ new DeferredInlineSmiOperation(op, int_value, reversed, mode);
- if (!reversed) {
- __ sub(r0, r0, Operand(value), SetCC);
- } else {
+ if (reversed) {
__ rsb(r0, r0, Operand(value), SetCC);
+ } else {
+ __ sub(r0, r0, Operand(value), SetCC);
}
- deferred->enter()->Branch(vs);
+ deferred->Branch(vs);
__ tst(r0, Operand(kSmiTagMask));
- deferred->enter()->Branch(ne);
+ deferred->Branch(ne);
deferred->BindExit();
break;
}
@@ -901,9 +917,9 @@ void CodeGenerator::SmiOperation(Token::Value op,
case Token::BIT_XOR:
case Token::BIT_AND: {
DeferredCode* deferred =
- new DeferredInlineSmiOperation(this, op, int_value, reversed, mode);
+ new DeferredInlineSmiOperation(op, int_value, reversed, mode);
__ tst(r0, Operand(kSmiTagMask));
- deferred->enter()->Branch(ne);
+ deferred->Branch(ne);
switch (op) {
case Token::BIT_OR: __ orr(r0, r0, Operand(value)); break;
case Token::BIT_XOR: __ eor(r0, r0, Operand(value)); break;
@@ -926,16 +942,16 @@ void CodeGenerator::SmiOperation(Token::Value op,
} else {
int shift_value = int_value & 0x1f; // least significant 5 bits
DeferredCode* deferred =
- new DeferredInlineSmiOperation(this, op, shift_value, false, mode);
+ new DeferredInlineSmiOperation(op, shift_value, false, mode);
__ tst(r0, Operand(kSmiTagMask));
- deferred->enter()->Branch(ne);
+ deferred->Branch(ne);
__ mov(r2, Operand(r0, ASR, kSmiTagSize)); // remove tags
switch (op) {
case Token::SHL: {
__ mov(r2, Operand(r2, LSL, shift_value));
// check that the *unsigned* result fits in a smi
__ add(r3, r2, Operand(0x40000000), SetCC);
- deferred->enter()->Branch(mi);
+ deferred->Branch(mi);
break;
}
case Token::SHR: {
@@ -950,7 +966,7 @@ void CodeGenerator::SmiOperation(Token::Value op,
// smi tagging these two cases can only happen with shifts
// by 0 or 1 when handed a valid smi
__ and_(r3, r2, Operand(0xc0000000), SetCC);
- deferred->enter()->Branch(ne);
+ deferred->Branch(ne);
break;
}
case Token::SAR: {
@@ -987,7 +1003,7 @@ void CodeGenerator::SmiOperation(Token::Value op,
void CodeGenerator::Comparison(Condition cc, bool strict) {
- VirtualFrame::SpilledScope spilled_scope(this);
+ VirtualFrame::SpilledScope spilled_scope;
// sp[0] : y
// sp[1] : x
// result : cc register
@@ -995,8 +1011,8 @@ void CodeGenerator::Comparison(Condition cc, bool strict) {
// Strict only makes sense for equality comparisons.
ASSERT(!strict || cc == eq);
- JumpTarget exit(this);
- JumpTarget smi(this);
+ JumpTarget exit;
+ JumpTarget smi;
// Implement '>' and '<=' by reversal to obtain ECMA-262 conversion order.
if (cc == gt || cc == le) {
cc = ReverseCondition(cc);
@@ -1057,12 +1073,14 @@ void CodeGenerator::Comparison(Condition cc, bool strict) {
class CallFunctionStub: public CodeStub {
public:
- explicit CallFunctionStub(int argc) : argc_(argc) {}
+ CallFunctionStub(int argc, InLoopFlag in_loop)
+ : argc_(argc), in_loop_(in_loop) {}
void Generate(MacroAssembler* masm);
private:
int argc_;
+ InLoopFlag in_loop_;
#if defined(DEBUG)
void Print() { PrintF("CallFunctionStub (argc %d)\n", argc_); }
@@ -1070,13 +1088,14 @@ class CallFunctionStub: public CodeStub {
Major MajorKey() { return CallFunction; }
int MinorKey() { return argc_; }
+ InLoopFlag InLoop() { return in_loop_; }
};
// Call the function on the stack with the given arguments.
void CodeGenerator::CallWithArguments(ZoneList<Expression*>* args,
int position) {
- VirtualFrame::SpilledScope spilled_scope(this);
+ VirtualFrame::SpilledScope spilled_scope;
// Push the arguments ("left-to-right") on the stack.
int arg_count = args->length();
for (int i = 0; i < arg_count; i++) {
@@ -1087,7 +1106,8 @@ void CodeGenerator::CallWithArguments(ZoneList<Expression*>* args,
CodeForSourcePosition(position);
// Use the shared code stub to call the function.
- CallFunctionStub call_function(arg_count);
+ InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
+ CallFunctionStub call_function(arg_count, in_loop);
frame_->CallStub(&call_function, arg_count + 1);
// Restore context and pop function from the stack.
@@ -1097,7 +1117,7 @@ void CodeGenerator::CallWithArguments(ZoneList<Expression*>* args,
void CodeGenerator::Branch(bool if_true, JumpTarget* target) {
- VirtualFrame::SpilledScope spilled_scope(this);
+ VirtualFrame::SpilledScope spilled_scope;
ASSERT(has_cc());
Condition cc = if_true ? cc_reg_ : NegateCondition(cc_reg_);
target->Branch(cc);
@@ -1106,7 +1126,7 @@ void CodeGenerator::Branch(bool if_true, JumpTarget* target) {
void CodeGenerator::CheckStack() {
- VirtualFrame::SpilledScope spilled_scope(this);
+ VirtualFrame::SpilledScope spilled_scope;
if (FLAG_check_stack) {
Comment cmnt(masm_, "[ check stack");
StackCheckStub stub;
@@ -1141,7 +1161,7 @@ void CodeGenerator::VisitStatements(ZoneList<Statement*>* statements) {
#ifdef DEBUG
int original_height = frame_->height();
#endif
- VirtualFrame::SpilledScope spilled_scope(this);
+ VirtualFrame::SpilledScope spilled_scope;
for (int i = 0; frame_ != NULL && i < statements->length(); i++) {
VisitAndSpill(statements->at(i));
}
@@ -1153,10 +1173,10 @@ void CodeGenerator::VisitBlock(Block* node) {
#ifdef DEBUG
int original_height = frame_->height();
#endif
- VirtualFrame::SpilledScope spilled_scope(this);
+ VirtualFrame::SpilledScope spilled_scope;
Comment cmnt(masm_, "[ Block");
CodeForStatementPosition(node);
- node->break_target()->Initialize(this);
+ node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
VisitStatementsAndSpill(node->statements());
if (node->break_target()->is_linked()) {
node->break_target()->Bind();
@@ -1167,7 +1187,7 @@ void CodeGenerator::VisitBlock(Block* node) {
void CodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
- VirtualFrame::SpilledScope spilled_scope(this);
+ VirtualFrame::SpilledScope spilled_scope;
__ mov(r0, Operand(pairs));
frame_->EmitPush(r0);
frame_->EmitPush(cp);
@@ -1182,7 +1202,7 @@ void CodeGenerator::VisitDeclaration(Declaration* node) {
#ifdef DEBUG
int original_height = frame_->height();
#endif
- VirtualFrame::SpilledScope spilled_scope(this);
+ VirtualFrame::SpilledScope spilled_scope;
Comment cmnt(masm_, "[ Declaration");
CodeForStatementPosition(node);
Variable* var = node->proxy()->var();
@@ -1254,7 +1274,7 @@ void CodeGenerator::VisitExpressionStatement(ExpressionStatement* node) {
#ifdef DEBUG
int original_height = frame_->height();
#endif
- VirtualFrame::SpilledScope spilled_scope(this);
+ VirtualFrame::SpilledScope spilled_scope;
Comment cmnt(masm_, "[ ExpressionStatement");
CodeForStatementPosition(node);
Expression* expression = node->expression();
@@ -1269,7 +1289,7 @@ void CodeGenerator::VisitEmptyStatement(EmptyStatement* node) {
#ifdef DEBUG
int original_height = frame_->height();
#endif
- VirtualFrame::SpilledScope spilled_scope(this);
+ VirtualFrame::SpilledScope spilled_scope;
Comment cmnt(masm_, "// EmptyStatement");
CodeForStatementPosition(node);
// nothing to do
@@ -1281,7 +1301,7 @@ void CodeGenerator::VisitIfStatement(IfStatement* node) {
#ifdef DEBUG
int original_height = frame_->height();
#endif
- VirtualFrame::SpilledScope spilled_scope(this);
+ VirtualFrame::SpilledScope spilled_scope;
Comment cmnt(masm_, "[ IfStatement");
// Generate different code depending on which parts of the if statement
// are present or not.
@@ -1290,11 +1310,11 @@ void CodeGenerator::VisitIfStatement(IfStatement* node) {
CodeForStatementPosition(node);
- JumpTarget exit(this);
+ JumpTarget exit;
if (has_then_stm && has_else_stm) {
Comment cmnt(masm_, "[ IfThenElse");
- JumpTarget then(this);
- JumpTarget else_(this);
+ JumpTarget then;
+ JumpTarget else_;
// if (cond)
LoadConditionAndSpill(node->condition(), NOT_INSIDE_TYPEOF,
&then, &else_, true);
@@ -1318,7 +1338,7 @@ void CodeGenerator::VisitIfStatement(IfStatement* node) {
} else if (has_then_stm) {
Comment cmnt(masm_, "[ IfThen");
ASSERT(!has_else_stm);
- JumpTarget then(this);
+ JumpTarget then;
// if (cond)
LoadConditionAndSpill(node->condition(), NOT_INSIDE_TYPEOF,
&then, &exit, true);
@@ -1334,7 +1354,7 @@ void CodeGenerator::VisitIfStatement(IfStatement* node) {
} else if (has_else_stm) {
Comment cmnt(masm_, "[ IfElse");
ASSERT(!has_then_stm);
- JumpTarget else_(this);
+ JumpTarget else_;
// if (!cond)
LoadConditionAndSpill(node->condition(), NOT_INSIDE_TYPEOF,
&exit, &else_, true);
@@ -1371,7 +1391,7 @@ void CodeGenerator::VisitIfStatement(IfStatement* node) {
void CodeGenerator::VisitContinueStatement(ContinueStatement* node) {
- VirtualFrame::SpilledScope spilled_scope(this);
+ VirtualFrame::SpilledScope spilled_scope;
Comment cmnt(masm_, "[ ContinueStatement");
CodeForStatementPosition(node);
node->target()->continue_target()->Jump();
@@ -1379,7 +1399,7 @@ void CodeGenerator::VisitContinueStatement(ContinueStatement* node) {
void CodeGenerator::VisitBreakStatement(BreakStatement* node) {
- VirtualFrame::SpilledScope spilled_scope(this);
+ VirtualFrame::SpilledScope spilled_scope;
Comment cmnt(masm_, "[ BreakStatement");
CodeForStatementPosition(node);
node->target()->break_target()->Jump();
@@ -1387,7 +1407,7 @@ void CodeGenerator::VisitBreakStatement(BreakStatement* node) {
void CodeGenerator::VisitReturnStatement(ReturnStatement* node) {
- VirtualFrame::SpilledScope spilled_scope(this);
+ VirtualFrame::SpilledScope spilled_scope;
Comment cmnt(masm_, "[ ReturnStatement");
if (function_return_is_shadowed_) {
@@ -1414,7 +1434,7 @@ void CodeGenerator::VisitWithEnterStatement(WithEnterStatement* node) {
#ifdef DEBUG
int original_height = frame_->height();
#endif
- VirtualFrame::SpilledScope spilled_scope(this);
+ VirtualFrame::SpilledScope spilled_scope;
Comment cmnt(masm_, "[ WithEnterStatement");
CodeForStatementPosition(node);
LoadAndSpill(node->expression());
@@ -1424,7 +1444,7 @@ void CodeGenerator::VisitWithEnterStatement(WithEnterStatement* node) {
frame_->CallRuntime(Runtime::kPushContext, 1);
}
#ifdef DEBUG
- JumpTarget verified_true(this);
+ JumpTarget verified_true;
__ cmp(r0, Operand(cp));
verified_true.Branch(eq);
__ stop("PushContext: r0 is expected to be the same as cp");
@@ -1440,7 +1460,7 @@ void CodeGenerator::VisitWithExitStatement(WithExitStatement* node) {
#ifdef DEBUG
int original_height = frame_->height();
#endif
- VirtualFrame::SpilledScope spilled_scope(this);
+ VirtualFrame::SpilledScope spilled_scope;
Comment cmnt(masm_, "[ WithExitStatement");
CodeForStatementPosition(node);
// Pop context.
@@ -1467,9 +1487,9 @@ void CodeGenerator::GenerateFastCaseSwitchJumpTable(
Label* default_label,
Vector<Label*> case_targets,
Vector<Label> case_labels) {
- VirtualFrame::SpilledScope spilled_scope(this);
- JumpTarget setup_default(this);
- JumpTarget is_smi(this);
+ VirtualFrame::SpilledScope spilled_scope;
+ JumpTarget setup_default;
+ JumpTarget is_smi;
// A non-null default label pointer indicates a default case among
// the case labels. Otherwise we use the break target as a
@@ -1529,8 +1549,6 @@ void CodeGenerator::GenerateFastCaseSwitchJumpTable(
if (node->break_target()->is_linked()) {
node->break_target()->Bind();
}
-
- delete start_frame;
}
@@ -1538,10 +1556,10 @@ void CodeGenerator::VisitSwitchStatement(SwitchStatement* node) {
#ifdef DEBUG
int original_height = frame_->height();
#endif
- VirtualFrame::SpilledScope spilled_scope(this);
+ VirtualFrame::SpilledScope spilled_scope;
Comment cmnt(masm_, "[ SwitchStatement");
CodeForStatementPosition(node);
- node->break_target()->Initialize(this);
+ node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
LoadAndSpill(node->tag());
if (TryGenerateFastCaseSwitchStatement(node)) {
@@ -1549,10 +1567,10 @@ void CodeGenerator::VisitSwitchStatement(SwitchStatement* node) {
return;
}
- JumpTarget next_test(this);
- JumpTarget fall_through(this);
- JumpTarget default_entry(this);
- JumpTarget default_exit(this, JumpTarget::BIDIRECTIONAL);
+ JumpTarget next_test;
+ JumpTarget fall_through;
+ JumpTarget default_entry;
+ JumpTarget default_exit(JumpTarget::BIDIRECTIONAL);
ZoneList<CaseClause*>* cases = node->cases();
int length = cases->length();
CaseClause* default_clause = NULL;
@@ -1632,10 +1650,10 @@ void CodeGenerator::VisitLoopStatement(LoopStatement* node) {
#ifdef DEBUG
int original_height = frame_->height();
#endif
- VirtualFrame::SpilledScope spilled_scope(this);
+ VirtualFrame::SpilledScope spilled_scope;
Comment cmnt(masm_, "[ LoopStatement");
CodeForStatementPosition(node);
- node->break_target()->Initialize(this);
+ node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
// Simple condition analysis. ALWAYS_TRUE and ALWAYS_FALSE represent a
// known result for the test expression, with no side effects.
@@ -1656,19 +1674,19 @@ void CodeGenerator::VisitLoopStatement(LoopStatement* node) {
switch (node->type()) {
case LoopStatement::DO_LOOP: {
- JumpTarget body(this, JumpTarget::BIDIRECTIONAL);
+ JumpTarget body(JumpTarget::BIDIRECTIONAL);
// Label the top of the loop for the backward CFG edge. If the test
// is always true we can use the continue target, and if the test is
// always false there is no need.
if (info == ALWAYS_TRUE) {
- node->continue_target()->Initialize(this, JumpTarget::BIDIRECTIONAL);
+ node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
node->continue_target()->Bind();
} else if (info == ALWAYS_FALSE) {
- node->continue_target()->Initialize(this);
+ node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
} else {
ASSERT(info == DONT_KNOW);
- node->continue_target()->Initialize(this);
+ node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
body.Bind();
}
@@ -1715,11 +1733,11 @@ void CodeGenerator::VisitLoopStatement(LoopStatement* node) {
// Label the top of the loop with the continue target for the backward
// CFG edge.
- node->continue_target()->Initialize(this, JumpTarget::BIDIRECTIONAL);
+ node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
node->continue_target()->Bind();
if (info == DONT_KNOW) {
- JumpTarget body(this);
+ JumpTarget body;
LoadConditionAndSpill(node->cond(), NOT_INSIDE_TYPEOF,
&body, node->break_target(), true);
if (has_valid_frame()) {
@@ -1745,7 +1763,7 @@ void CodeGenerator::VisitLoopStatement(LoopStatement* node) {
}
case LoopStatement::FOR_LOOP: {
- JumpTarget loop(this, JumpTarget::BIDIRECTIONAL);
+ JumpTarget loop(JumpTarget::BIDIRECTIONAL);
if (node->init() != NULL) {
VisitAndSpill(node->init());
@@ -1757,16 +1775,16 @@ void CodeGenerator::VisitLoopStatement(LoopStatement* node) {
// If there is no update statement, label the top of the loop with the
// continue target, otherwise with the loop target.
if (node->next() == NULL) {
- node->continue_target()->Initialize(this, JumpTarget::BIDIRECTIONAL);
+ node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
node->continue_target()->Bind();
} else {
- node->continue_target()->Initialize(this);
+ node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
loop.Bind();
}
// If the test is always true, there is no need to compile it.
if (info == DONT_KNOW) {
- JumpTarget body(this);
+ JumpTarget body;
LoadConditionAndSpill(node->cond(), NOT_INSIDE_TYPEOF,
&body, node->break_target(), true);
if (has_valid_frame()) {
@@ -1822,16 +1840,16 @@ void CodeGenerator::VisitForInStatement(ForInStatement* node) {
int original_height = frame_->height();
#endif
ASSERT(!in_spilled_code());
- VirtualFrame::SpilledScope spilled_scope(this);
+ VirtualFrame::SpilledScope spilled_scope;
Comment cmnt(masm_, "[ ForInStatement");
CodeForStatementPosition(node);
- JumpTarget primitive(this);
- JumpTarget jsobject(this);
- JumpTarget fixed_array(this);
- JumpTarget entry(this, JumpTarget::BIDIRECTIONAL);
- JumpTarget end_del_check(this);
- JumpTarget exit(this);
+ JumpTarget primitive;
+ JumpTarget jsobject;
+ JumpTarget fixed_array;
+ JumpTarget entry(JumpTarget::BIDIRECTIONAL);
+ JumpTarget end_del_check;
+ JumpTarget exit;
// Get the object to enumerate over (converted to JSObject).
LoadAndSpill(node->enumerable());
@@ -1916,8 +1934,8 @@ void CodeGenerator::VisitForInStatement(ForInStatement* node) {
// sp[4] : enumerable
// Grab the current frame's height for the break and continue
// targets only after all the state is pushed on the frame.
- node->break_target()->Initialize(this);
- node->continue_target()->Initialize(this);
+ node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
+ node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
__ ldr(r0, frame_->ElementAt(0)); // load the current count
__ ldr(r1, frame_->ElementAt(1)); // load the length
@@ -2016,12 +2034,12 @@ void CodeGenerator::VisitTryCatch(TryCatch* node) {
#ifdef DEBUG
int original_height = frame_->height();
#endif
- VirtualFrame::SpilledScope spilled_scope(this);
+ VirtualFrame::SpilledScope spilled_scope;
Comment cmnt(masm_, "[ TryCatch");
CodeForStatementPosition(node);
- JumpTarget try_block(this);
- JumpTarget exit(this);
+ JumpTarget try_block;
+ JumpTarget exit;
try_block.Call();
// --- Catch block ---
@@ -2132,7 +2150,6 @@ void CodeGenerator::VisitTryCatch(TryCatch* node) {
}
shadows[i]->other_target()->Jump();
}
- delete shadows[i];
}
exit.Bind();
@@ -2144,7 +2161,7 @@ void CodeGenerator::VisitTryFinally(TryFinally* node) {
#ifdef DEBUG
int original_height = frame_->height();
#endif
- VirtualFrame::SpilledScope spilled_scope(this);
+ VirtualFrame::SpilledScope spilled_scope;
Comment cmnt(masm_, "[ TryFinally");
CodeForStatementPosition(node);
@@ -2153,8 +2170,8 @@ void CodeGenerator::VisitTryFinally(TryFinally* node) {
// break/continue from within the try block.
enum { FALLING, THROWING, JUMPING };
- JumpTarget try_block(this);
- JumpTarget finally_block(this);
+ JumpTarget try_block;
+ JumpTarget finally_block;
try_block.Call();
@@ -2299,7 +2316,7 @@ void CodeGenerator::VisitTryFinally(TryFinally* node) {
JumpTarget* original = shadows[i]->other_target();
__ cmp(r2, Operand(Smi::FromInt(JUMPING + i)));
if (!function_return_is_shadowed_ && i == kReturnShadowIndex) {
- JumpTarget skip(this);
+ JumpTarget skip;
skip.Branch(ne);
frame_->PrepareForReturn();
original->Jump();
@@ -2308,12 +2325,11 @@ void CodeGenerator::VisitTryFinally(TryFinally* node) {
original->Branch(eq);
}
}
- delete shadows[i];
}
if (has_valid_frame()) {
// Check if we need to rethrow the exception.
- JumpTarget exit(this);
+ JumpTarget exit;
__ cmp(r2, Operand(Smi::FromInt(THROWING)));
exit.Branch(ne);
@@ -2332,7 +2348,7 @@ void CodeGenerator::VisitDebuggerStatement(DebuggerStatement* node) {
#ifdef DEBUG
int original_height = frame_->height();
#endif
- VirtualFrame::SpilledScope spilled_scope(this);
+ VirtualFrame::SpilledScope spilled_scope;
Comment cmnt(masm_, "[ DebuggerStatament");
CodeForStatementPosition(node);
#ifdef ENABLE_DEBUGGER_SUPPORT
@@ -2344,7 +2360,7 @@ void CodeGenerator::VisitDebuggerStatement(DebuggerStatement* node) {
void CodeGenerator::InstantiateBoilerplate(Handle<JSFunction> boilerplate) {
- VirtualFrame::SpilledScope spilled_scope(this);
+ VirtualFrame::SpilledScope spilled_scope;
ASSERT(boilerplate->IsBoilerplate());
// Push the boilerplate on the stack.
@@ -2362,7 +2378,7 @@ void CodeGenerator::VisitFunctionLiteral(FunctionLiteral* node) {
#ifdef DEBUG
int original_height = frame_->height();
#endif
- VirtualFrame::SpilledScope spilled_scope(this);
+ VirtualFrame::SpilledScope spilled_scope;
Comment cmnt(masm_, "[ FunctionLiteral");
// Build the function boilerplate and instantiate it.
@@ -2382,7 +2398,7 @@ void CodeGenerator::VisitFunctionBoilerplateLiteral(
#ifdef DEBUG
int original_height = frame_->height();
#endif
- VirtualFrame::SpilledScope spilled_scope(this);
+ VirtualFrame::SpilledScope spilled_scope;
Comment cmnt(masm_, "[ FunctionBoilerplateLiteral");
InstantiateBoilerplate(node->boilerplate());
ASSERT(frame_->height() == original_height + 1);
@@ -2393,11 +2409,11 @@ void CodeGenerator::VisitConditional(Conditional* node) {
#ifdef DEBUG
int original_height = frame_->height();
#endif
- VirtualFrame::SpilledScope spilled_scope(this);
+ VirtualFrame::SpilledScope spilled_scope;
Comment cmnt(masm_, "[ Conditional");
- JumpTarget then(this);
- JumpTarget else_(this);
- JumpTarget exit(this);
+ JumpTarget then;
+ JumpTarget else_;
+ JumpTarget exit;
LoadConditionAndSpill(node->condition(), NOT_INSIDE_TYPEOF,
&then, &else_, true);
Branch(false, &else_);
@@ -2412,12 +2428,12 @@ void CodeGenerator::VisitConditional(Conditional* node) {
void CodeGenerator::LoadFromSlot(Slot* slot, TypeofState typeof_state) {
- VirtualFrame::SpilledScope spilled_scope(this);
+ VirtualFrame::SpilledScope spilled_scope;
if (slot->type() == Slot::LOOKUP) {
ASSERT(slot->var()->is_dynamic());
- JumpTarget slow(this);
- JumpTarget done(this);
+ JumpTarget slow;
+ JumpTarget done;
// Generate fast-case code for variables that might be shadowed by
// eval-introduced variables. Eval is used a lot without
@@ -2565,7 +2581,7 @@ void CodeGenerator::VisitSlot(Slot* node) {
#ifdef DEBUG
int original_height = frame_->height();
#endif
- VirtualFrame::SpilledScope spilled_scope(this);
+ VirtualFrame::SpilledScope spilled_scope;
Comment cmnt(masm_, "[ Slot");
LoadFromSlot(node, typeof_state());
ASSERT(frame_->height() == original_height + 1);
@@ -2576,7 +2592,7 @@ void CodeGenerator::VisitVariableProxy(VariableProxy* node) {
#ifdef DEBUG
int original_height = frame_->height();
#endif
- VirtualFrame::SpilledScope spilled_scope(this);
+ VirtualFrame::SpilledScope spilled_scope;
Comment cmnt(masm_, "[ VariableProxy");
Variable* var = node->var();
@@ -2596,7 +2612,7 @@ void CodeGenerator::VisitLiteral(Literal* node) {
#ifdef DEBUG
int original_height = frame_->height();
#endif
- VirtualFrame::SpilledScope spilled_scope(this);
+ VirtualFrame::SpilledScope spilled_scope;
Comment cmnt(masm_, "[ Literal");
__ mov(r0, Operand(node->handle()));
frame_->EmitPush(r0);
@@ -2608,7 +2624,7 @@ void CodeGenerator::VisitRegExpLiteral(RegExpLiteral* node) {
#ifdef DEBUG
int original_height = frame_->height();
#endif
- VirtualFrame::SpilledScope spilled_scope(this);
+ VirtualFrame::SpilledScope spilled_scope;
Comment cmnt(masm_, "[ RexExp Literal");
// Retrieve the literal array and check the allocated entry.
@@ -2624,7 +2640,7 @@ void CodeGenerator::VisitRegExpLiteral(RegExpLiteral* node) {
FixedArray::kHeaderSize + node->literal_index() * kPointerSize;
__ ldr(r2, FieldMemOperand(r1, literal_offset));
- JumpTarget done(this);
+ JumpTarget done;
__ cmp(r2, Operand(Factory::undefined_value()));
done.Branch(ne);
@@ -2653,8 +2669,7 @@ void CodeGenerator::VisitRegExpLiteral(RegExpLiteral* node) {
// therefore context dependent.
class DeferredObjectLiteral: public DeferredCode {
public:
- DeferredObjectLiteral(CodeGenerator* generator, ObjectLiteral* node)
- : DeferredCode(generator), node_(node) {
+ explicit DeferredObjectLiteral(ObjectLiteral* node) : node_(node) {
set_comment("[ DeferredObjectLiteral");
}
@@ -2667,26 +2682,20 @@ class DeferredObjectLiteral: public DeferredCode {
void DeferredObjectLiteral::Generate() {
// Argument is passed in r1.
- enter()->Bind();
- VirtualFrame::SpilledScope spilled_scope(generator());
// If the entry is undefined we call the runtime system to compute
// the literal.
-
- VirtualFrame* frame = generator()->frame();
// Literal array (0).
- frame->EmitPush(r1);
+ __ push(r1);
// Literal index (1).
__ mov(r0, Operand(Smi::FromInt(node_->literal_index())));
- frame->EmitPush(r0);
+ __ push(r0);
// Constant properties (2).
__ mov(r0, Operand(node_->constant_properties()));
- frame->EmitPush(r0);
- Result boilerplate =
- frame->CallRuntime(Runtime::kCreateObjectLiteralBoilerplate, 3);
- __ mov(r2, Operand(boilerplate.reg()));
+ __ push(r0);
+ __ CallRuntime(Runtime::kCreateObjectLiteralBoilerplate, 3);
+ __ mov(r2, Operand(r0));
// Result is returned in r2.
- exit_.Jump();
}
@@ -2694,10 +2703,10 @@ void CodeGenerator::VisitObjectLiteral(ObjectLiteral* node) {
#ifdef DEBUG
int original_height = frame_->height();
#endif
- VirtualFrame::SpilledScope spilled_scope(this);
+ VirtualFrame::SpilledScope spilled_scope;
Comment cmnt(masm_, "[ ObjectLiteral");
- DeferredObjectLiteral* deferred = new DeferredObjectLiteral(this, node);
+ DeferredObjectLiteral* deferred = new DeferredObjectLiteral(node);
// Retrieve the literal array and check the allocated entry.
@@ -2715,7 +2724,7 @@ void CodeGenerator::VisitObjectLiteral(ObjectLiteral* node) {
// Check whether we need to materialize the object literal boilerplate.
// If so, jump to the deferred code.
__ cmp(r2, Operand(Factory::undefined_value()));
- deferred->enter()->Branch(eq);
+ deferred->Branch(eq);
deferred->BindExit();
// Push the object literal boilerplate.
@@ -2782,8 +2791,7 @@ void CodeGenerator::VisitObjectLiteral(ObjectLiteral* node) {
// therefore context dependent.
class DeferredArrayLiteral: public DeferredCode {
public:
- DeferredArrayLiteral(CodeGenerator* generator, ArrayLiteral* node)
- : DeferredCode(generator), node_(node) {
+ explicit DeferredArrayLiteral(ArrayLiteral* node) : node_(node) {
set_comment("[ DeferredArrayLiteral");
}
@@ -2796,26 +2804,20 @@ class DeferredArrayLiteral: public DeferredCode {
void DeferredArrayLiteral::Generate() {
// Argument is passed in r1.
- enter()->Bind();
- VirtualFrame::SpilledScope spilled_scope(generator());
// If the entry is undefined we call the runtime system to computed
// the literal.
-
- VirtualFrame* frame = generator()->frame();
// Literal array (0).
- frame->EmitPush(r1);
+ __ push(r1);
// Literal index (1).
__ mov(r0, Operand(Smi::FromInt(node_->literal_index())));
- frame->EmitPush(r0);
+ __ push(r0);
// Constant properties (2).
__ mov(r0, Operand(node_->literals()));
- frame->EmitPush(r0);
- Result boilerplate =
- frame->CallRuntime(Runtime::kCreateArrayLiteralBoilerplate, 3);
- __ mov(r2, Operand(boilerplate.reg()));
+ __ push(r0);
+ __ CallRuntime(Runtime::kCreateArrayLiteralBoilerplate, 3);
+ __ mov(r2, Operand(r0));
// Result is returned in r2.
- exit_.Jump();
}
@@ -2823,10 +2825,10 @@ void CodeGenerator::VisitArrayLiteral(ArrayLiteral* node) {
#ifdef DEBUG
int original_height = frame_->height();
#endif
- VirtualFrame::SpilledScope spilled_scope(this);
+ VirtualFrame::SpilledScope spilled_scope;
Comment cmnt(masm_, "[ ArrayLiteral");
- DeferredArrayLiteral* deferred = new DeferredArrayLiteral(this, node);
+ DeferredArrayLiteral* deferred = new DeferredArrayLiteral(node);
// Retrieve the literal array and check the allocated entry.
@@ -2844,7 +2846,7 @@ void CodeGenerator::VisitArrayLiteral(ArrayLiteral* node) {
// Check whether we need to materialize the object literal boilerplate.
// If so, jump to the deferred code.
__ cmp(r2, Operand(Factory::undefined_value()));
- deferred->enter()->Branch(eq);
+ deferred->Branch(eq);
deferred->BindExit();
// Push the object literal boilerplate.
@@ -2897,7 +2899,7 @@ void CodeGenerator::VisitCatchExtensionObject(CatchExtensionObject* node) {
int original_height = frame_->height();
#endif
ASSERT(!in_spilled_code());
- VirtualFrame::SpilledScope spilled_scope(this);
+ VirtualFrame::SpilledScope spilled_scope;
// Call runtime routine to allocate the catch extension object and
// assign the exception value to the catch variable.
Comment cmnt(masm_, "[ CatchExtensionObject");
@@ -2914,7 +2916,7 @@ void CodeGenerator::VisitAssignment(Assignment* node) {
#ifdef DEBUG
int original_height = frame_->height();
#endif
- VirtualFrame::SpilledScope spilled_scope(this);
+ VirtualFrame::SpilledScope spilled_scope;
Comment cmnt(masm_, "[ Assignment");
CodeForStatementPosition(node);
@@ -2982,7 +2984,7 @@ void CodeGenerator::VisitThrow(Throw* node) {
#ifdef DEBUG
int original_height = frame_->height();
#endif
- VirtualFrame::SpilledScope spilled_scope(this);
+ VirtualFrame::SpilledScope spilled_scope;
Comment cmnt(masm_, "[ Throw");
LoadAndSpill(node->exception());
@@ -2997,7 +2999,7 @@ void CodeGenerator::VisitProperty(Property* node) {
#ifdef DEBUG
int original_height = frame_->height();
#endif
- VirtualFrame::SpilledScope spilled_scope(this);
+ VirtualFrame::SpilledScope spilled_scope;
Comment cmnt(masm_, "[ Property");
{ Reference property(this, node);
@@ -3011,7 +3013,7 @@ void CodeGenerator::VisitCall(Call* node) {
#ifdef DEBUG
int original_height = frame_->height();
#endif
- VirtualFrame::SpilledScope spilled_scope(this);
+ VirtualFrame::SpilledScope spilled_scope;
Comment cmnt(masm_, "[ Call");
ZoneList<Expression*>* args = node->arguments();
@@ -3054,7 +3056,8 @@ void CodeGenerator::VisitCall(Call* node) {
}
// Setup the receiver register and call the IC initialization code.
- Handle<Code> stub = ComputeCallInitialize(arg_count);
+ InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
+ Handle<Code> stub = ComputeCallInitialize(arg_count, in_loop);
CodeForSourcePosition(node->position());
frame_->CallCodeObject(stub, RelocInfo::CODE_TARGET_CONTEXT,
arg_count + 1);
@@ -3105,7 +3108,8 @@ void CodeGenerator::VisitCall(Call* node) {
}
// Set the receiver register and call the IC initialization code.
- Handle<Code> stub = ComputeCallInitialize(arg_count);
+ InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
+ Handle<Code> stub = ComputeCallInitialize(arg_count, in_loop);
CodeForSourcePosition(node->position());
frame_->CallCodeObject(stub, RelocInfo::CODE_TARGET, arg_count + 1);
__ ldr(cp, frame_->Context());
@@ -3160,7 +3164,7 @@ void CodeGenerator::VisitCallEval(CallEval* node) {
#ifdef DEBUG
int original_height = frame_->height();
#endif
- VirtualFrame::SpilledScope spilled_scope(this);
+ VirtualFrame::SpilledScope spilled_scope;
Comment cmnt(masm_, "[ CallEval");
// In a call to eval, we first call %ResolvePossiblyDirectEval to resolve
@@ -3203,7 +3207,8 @@ void CodeGenerator::VisitCallEval(CallEval* node) {
// Call the function.
CodeForSourcePosition(node->position());
- CallFunctionStub call_function(arg_count);
+ InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
+ CallFunctionStub call_function(arg_count, in_loop);
frame_->CallStub(&call_function, arg_count + 1);
__ ldr(cp, frame_->Context());
@@ -3218,7 +3223,7 @@ void CodeGenerator::VisitCallNew(CallNew* node) {
#ifdef DEBUG
int original_height = frame_->height();
#endif
- VirtualFrame::SpilledScope spilled_scope(this);
+ VirtualFrame::SpilledScope spilled_scope;
Comment cmnt(masm_, "[ CallNew");
CodeForStatementPosition(node);
@@ -3268,9 +3273,9 @@ void CodeGenerator::VisitCallNew(CallNew* node) {
void CodeGenerator::GenerateValueOf(ZoneList<Expression*>* args) {
- VirtualFrame::SpilledScope spilled_scope(this);
+ VirtualFrame::SpilledScope spilled_scope;
ASSERT(args->length() == 1);
- JumpTarget leave(this);
+ JumpTarget leave;
LoadAndSpill(args->at(0));
frame_->EmitPop(r0); // r0 contains object.
// if (object->IsSmi()) return the object.
@@ -3290,9 +3295,9 @@ void CodeGenerator::GenerateValueOf(ZoneList<Expression*>* args) {
void CodeGenerator::GenerateSetValueOf(ZoneList<Expression*>* args) {
- VirtualFrame::SpilledScope spilled_scope(this);
+ VirtualFrame::SpilledScope spilled_scope;
ASSERT(args->length() == 2);
- JumpTarget leave(this);
+ JumpTarget leave;
LoadAndSpill(args->at(0)); // Load the object.
LoadAndSpill(args->at(1)); // Load the value.
frame_->EmitPop(r0); // r0 contains value
@@ -3318,7 +3323,7 @@ void CodeGenerator::GenerateSetValueOf(ZoneList<Expression*>* args) {
void CodeGenerator::GenerateIsSmi(ZoneList<Expression*>* args) {
- VirtualFrame::SpilledScope spilled_scope(this);
+ VirtualFrame::SpilledScope spilled_scope;
ASSERT(args->length() == 1);
LoadAndSpill(args->at(0));
frame_->EmitPop(r0);
@@ -3328,7 +3333,7 @@ void CodeGenerator::GenerateIsSmi(ZoneList<Expression*>* args) {
void CodeGenerator::GenerateLog(ZoneList<Expression*>* args) {
- VirtualFrame::SpilledScope spilled_scope(this);
+ VirtualFrame::SpilledScope spilled_scope;
// See comment in CodeGenerator::GenerateLog in codegen-ia32.cc.
ASSERT_EQ(args->length(), 3);
#ifdef ENABLE_LOGGING_AND_PROFILING
@@ -3344,7 +3349,7 @@ void CodeGenerator::GenerateLog(ZoneList<Expression*>* args) {
void CodeGenerator::GenerateIsNonNegativeSmi(ZoneList<Expression*>* args) {
- VirtualFrame::SpilledScope spilled_scope(this);
+ VirtualFrame::SpilledScope spilled_scope;
ASSERT(args->length() == 1);
LoadAndSpill(args->at(0));
frame_->EmitPop(r0);
@@ -3357,7 +3362,7 @@ void CodeGenerator::GenerateIsNonNegativeSmi(ZoneList<Expression*>* args) {
// undefined in order to trigger the slow case, Runtime_StringCharCodeAt.
// It is not yet implemented on ARM, so it always goes to the slow case.
void CodeGenerator::GenerateFastCharCodeAt(ZoneList<Expression*>* args) {
- VirtualFrame::SpilledScope spilled_scope(this);
+ VirtualFrame::SpilledScope spilled_scope;
ASSERT(args->length() == 2);
__ mov(r0, Operand(Factory::undefined_value()));
frame_->EmitPush(r0);
@@ -3365,10 +3370,10 @@ void CodeGenerator::GenerateFastCharCodeAt(ZoneList<Expression*>* args) {
void CodeGenerator::GenerateIsArray(ZoneList<Expression*>* args) {
- VirtualFrame::SpilledScope spilled_scope(this);
+ VirtualFrame::SpilledScope spilled_scope;
ASSERT(args->length() == 1);
LoadAndSpill(args->at(0));
- JumpTarget answer(this);
+ JumpTarget answer;
// We need the CC bits to come out as not_equal in the case where the
// object is a smi. This can't be done with the usual test opcode so
// we use XOR to get the right CC bits.
@@ -3387,7 +3392,7 @@ void CodeGenerator::GenerateIsArray(ZoneList<Expression*>* args) {
void CodeGenerator::GenerateArgumentsLength(ZoneList<Expression*>* args) {
- VirtualFrame::SpilledScope spilled_scope(this);
+ VirtualFrame::SpilledScope spilled_scope;
ASSERT(args->length() == 0);
// Seed the result with the formal parameters count, which will be used
@@ -3402,7 +3407,7 @@ void CodeGenerator::GenerateArgumentsLength(ZoneList<Expression*>* args) {
void CodeGenerator::GenerateArgumentsAccess(ZoneList<Expression*>* args) {
- VirtualFrame::SpilledScope spilled_scope(this);
+ VirtualFrame::SpilledScope spilled_scope;
ASSERT(args->length() == 1);
// Satisfy contract with ArgumentsAccessStub:
@@ -3419,7 +3424,7 @@ void CodeGenerator::GenerateArgumentsAccess(ZoneList<Expression*>* args) {
void CodeGenerator::GenerateObjectEquals(ZoneList<Expression*>* args) {
- VirtualFrame::SpilledScope spilled_scope(this);
+ VirtualFrame::SpilledScope spilled_scope;
ASSERT(args->length() == 2);
// Load the two objects into registers and perform the comparison.
@@ -3436,7 +3441,7 @@ void CodeGenerator::VisitCallRuntime(CallRuntime* node) {
#ifdef DEBUG
int original_height = frame_->height();
#endif
- VirtualFrame::SpilledScope spilled_scope(this);
+ VirtualFrame::SpilledScope spilled_scope;
if (CheckForInlineRuntimeCall(node)) {
ASSERT((has_cc() && frame_->height() == original_height) ||
(!has_cc() && frame_->height() == original_height + 1));
@@ -3465,7 +3470,8 @@ void CodeGenerator::VisitCallRuntime(CallRuntime* node) {
if (function == NULL) {
// Call the JS runtime function.
- Handle<Code> stub = ComputeCallInitialize(arg_count);
+ InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
+ Handle<Code> stub = ComputeCallInitialize(arg_count, in_loop);
frame_->CallCodeObject(stub, RelocInfo::CODE_TARGET, arg_count + 1);
__ ldr(cp, frame_->Context());
frame_->Drop();
@@ -3483,7 +3489,7 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
#ifdef DEBUG
int original_height = frame_->height();
#endif
- VirtualFrame::SpilledScope spilled_scope(this);
+ VirtualFrame::SpilledScope spilled_scope;
Comment cmnt(masm_, "[ UnaryOperation");
Token::Value op = node->op();
@@ -3572,8 +3578,8 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
case Token::BIT_NOT: {
// smi check
- JumpTarget smi_label(this);
- JumpTarget continue_label(this);
+ JumpTarget smi_label;
+ JumpTarget continue_label;
__ tst(r0, Operand(kSmiTagMask));
smi_label.Branch(eq);
@@ -3599,7 +3605,7 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
case Token::ADD: {
// Smi check.
- JumpTarget continue_label(this);
+ JumpTarget continue_label;
__ tst(r0, Operand(kSmiTagMask));
continue_label.Branch(eq);
frame_->EmitPush(r0);
@@ -3624,7 +3630,7 @@ void CodeGenerator::VisitCountOperation(CountOperation* node) {
#ifdef DEBUG
int original_height = frame_->height();
#endif
- VirtualFrame::SpilledScope spilled_scope(this);
+ VirtualFrame::SpilledScope spilled_scope;
Comment cmnt(masm_, "[ CountOperation");
bool is_postfix = node->is_postfix();
@@ -3653,8 +3659,8 @@ void CodeGenerator::VisitCountOperation(CountOperation* node) {
target.GetValueAndSpill(NOT_INSIDE_TYPEOF);
frame_->EmitPop(r0);
- JumpTarget slow(this);
- JumpTarget exit(this);
+ JumpTarget slow;
+ JumpTarget exit;
// Load the value (1) into register r1.
__ mov(r1, Operand(Smi::FromInt(1)));
@@ -3726,7 +3732,7 @@ void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) {
#ifdef DEBUG
int original_height = frame_->height();
#endif
- VirtualFrame::SpilledScope spilled_scope(this);
+ VirtualFrame::SpilledScope spilled_scope;
Comment cmnt(masm_, "[ BinaryOperation");
Token::Value op = node->op();
@@ -3743,7 +3749,7 @@ void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) {
// of compiling the binary operation is materialized or not.
if (op == Token::AND) {
- JumpTarget is_true(this);
+ JumpTarget is_true;
LoadConditionAndSpill(node->left(),
NOT_INSIDE_TYPEOF,
&is_true,
@@ -3761,8 +3767,8 @@ void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) {
false);
} else {
- JumpTarget pop_and_continue(this);
- JumpTarget exit(this);
+ JumpTarget pop_and_continue;
+ JumpTarget exit;
__ ldr(r0, frame_->Top()); // dup the stack top
frame_->EmitPush(r0);
@@ -3785,7 +3791,7 @@ void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) {
}
} else if (op == Token::OR) {
- JumpTarget is_false(this);
+ JumpTarget is_false;
LoadConditionAndSpill(node->left(),
NOT_INSIDE_TYPEOF,
true_target(),
@@ -3803,8 +3809,8 @@ void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) {
false);
} else {
- JumpTarget pop_and_continue(this);
- JumpTarget exit(this);
+ JumpTarget pop_and_continue;
+ JumpTarget exit;
__ ldr(r0, frame_->Top());
frame_->EmitPush(r0);
@@ -3876,7 +3882,7 @@ void CodeGenerator::VisitThisFunction(ThisFunction* node) {
#ifdef DEBUG
int original_height = frame_->height();
#endif
- VirtualFrame::SpilledScope spilled_scope(this);
+ VirtualFrame::SpilledScope spilled_scope;
__ ldr(r0, frame_->Function());
frame_->EmitPush(r0);
ASSERT(frame_->height() == original_height + 1);
@@ -3887,7 +3893,7 @@ void CodeGenerator::VisitCompareOperation(CompareOperation* node) {
#ifdef DEBUG
int original_height = frame_->height();
#endif
- VirtualFrame::SpilledScope spilled_scope(this);
+ VirtualFrame::SpilledScope spilled_scope;
Comment cmnt(masm_, "[ CompareOperation");
// Get the expressions from the node.
@@ -4245,7 +4251,7 @@ void Reference::SetValue(InitState init_state) {
} else {
ASSERT(!slot->var()->is_dynamic());
- JumpTarget exit(cgen_);
+ JumpTarget exit;
if (init_state == CONST_INIT) {
ASSERT(slot->var()->mode() == Variable::CONST);
// Only the first const initialization must be executed (the slot
@@ -4335,6 +4341,45 @@ void Reference::SetValue(InitState init_state) {
}
+static void AllocateHeapNumber(
+ MacroAssembler* masm,
+ Label* need_gc, // Jump here if young space is full.
+ Register result_reg, // The tagged address of the new heap number.
+ Register allocation_top_addr_reg, // A scratch register.
+ Register scratch2) { // Another scratch register.
+ ExternalReference allocation_top =
+ ExternalReference::new_space_allocation_top_address();
+ ExternalReference allocation_limit =
+ ExternalReference::new_space_allocation_limit_address();
+
+ // allocat := the address of the allocation top variable.
+ __ mov(allocation_top_addr_reg, Operand(allocation_top));
+ // result_reg := the old allocation top.
+ __ ldr(result_reg, MemOperand(allocation_top_addr_reg));
+ // scratch2 := the address of the allocation limit.
+ __ mov(scratch2, Operand(allocation_limit));
+ // scratch2 := the allocation limit.
+ __ ldr(scratch2, MemOperand(scratch2));
+ // result_reg := the new allocation top.
+ __ add(result_reg, result_reg, Operand(HeapNumber::kSize));
+ // Compare new new allocation top and limit.
+ __ cmp(result_reg, Operand(scratch2));
+ // Branch if out of space in young generation.
+ __ b(hi, need_gc);
+ // Store new allocation top.
+ __ str(result_reg, MemOperand(allocation_top_addr_reg)); // store new top
+ // Tag and adjust back to start of new object.
+ __ sub(result_reg, result_reg, Operand(HeapNumber::kSize - kHeapObjectTag));
+ // Get heap number map into scratch2.
+ __ mov(scratch2, Operand(Factory::heap_number_map()));
+ // Store heap number map in new object.
+ __ str(scratch2, FieldMemOperand(result_reg, HeapObject::kMapOffset));
+}
+
+
+// We fall into this code if the operands were Smis, but the result was
+// not (eg. overflow). We branch into this code (to the not_smi label) if
+// the operands were not both Smi.
static void HandleBinaryOpSlowCases(MacroAssembler* masm,
Label* not_smi,
const Builtins::JavaScript& builtin,
@@ -4342,73 +4387,74 @@ static void HandleBinaryOpSlowCases(MacroAssembler* masm,
int swi_number,
OverwriteMode mode) {
Label slow;
- if (mode == NO_OVERWRITE) {
- __ bind(not_smi);
- }
__ bind(&slow);
__ push(r1);
__ push(r0);
__ mov(r0, Operand(1)); // Set number of arguments.
__ InvokeBuiltin(builtin, JUMP_JS); // Tail call.
- // Could it be a double-double op? If we already have a place to put
- // the answer then we can do the op and skip the builtin and runtime call.
- if (mode != NO_OVERWRITE) {
- __ bind(not_smi);
- __ tst(r0, Operand(kSmiTagMask));
- __ b(eq, &slow); // We can't handle a Smi-double combination yet.
- __ tst(r1, Operand(kSmiTagMask));
- __ b(eq, &slow); // We can't handle a Smi-double combination yet.
- // Get map of r0 into r2.
- __ ldr(r2, FieldMemOperand(r0, HeapObject::kMapOffset));
- // Get type of r0 into r3.
- __ ldrb(r3, FieldMemOperand(r2, Map::kInstanceTypeOffset));
- __ cmp(r3, Operand(HEAP_NUMBER_TYPE));
- __ b(ne, &slow);
- // Get type of r1 into r3.
- __ ldr(r3, FieldMemOperand(r1, HeapObject::kMapOffset));
- // Check they are both the same map (heap number map).
- __ cmp(r2, r3);
- __ b(ne, &slow);
- // Both are doubles.
- // Calling convention says that second double is in r2 and r3.
- __ ldr(r2, FieldMemOperand(r0, HeapNumber::kValueOffset));
- __ ldr(r3, FieldMemOperand(r0, HeapNumber::kValueOffset + kPointerSize));
+ __ bind(not_smi);
+ __ tst(r0, Operand(kSmiTagMask));
+ __ b(eq, &slow); // We can't handle a Smi-double combination yet.
+ __ tst(r1, Operand(kSmiTagMask));
+ __ b(eq, &slow); // We can't handle a Smi-double combination yet.
+ // Get map of r0 into r2.
+ __ ldr(r2, FieldMemOperand(r0, HeapObject::kMapOffset));
+ // Get type of r0 into r3.
+ __ ldrb(r3, FieldMemOperand(r2, Map::kInstanceTypeOffset));
+ __ cmp(r3, Operand(HEAP_NUMBER_TYPE));
+ __ b(ne, &slow);
+ // Get type of r1 into r3.
+ __ ldr(r3, FieldMemOperand(r1, HeapObject::kMapOffset));
+ // Check they are both the same map (heap number map).
+ __ cmp(r2, r3);
+ __ b(ne, &slow);
+ // Both are doubles.
+ // Calling convention says that second double is in r2 and r3.
+ __ ldr(r2, FieldMemOperand(r0, HeapNumber::kValueOffset));
+ __ ldr(r3, FieldMemOperand(r0, HeapNumber::kValueOffset + kPointerSize));
+
+ if (mode == NO_OVERWRITE) {
+ // Get address of new heap number into r5.
+ AllocateHeapNumber(masm, &slow, r5, r6, r7);
__ push(lr);
- if (mode == OVERWRITE_LEFT) {
- __ push(r1);
- } else {
- __ push(r0);
- }
- // Calling convention says that first double is in r0 and r1.
- __ ldr(r0, FieldMemOperand(r1, HeapNumber::kValueOffset));
- __ ldr(r1, FieldMemOperand(r1, HeapNumber::kValueOffset + kPointerSize));
- // Call C routine that may not cause GC or other trouble.
- __ mov(r5, Operand(ExternalReference::double_fp_operation(operation)));
+ __ push(r5);
+ } else if (mode == OVERWRITE_LEFT) {
+ __ push(lr);
+ __ push(r1);
+ } else {
+ ASSERT(mode == OVERWRITE_RIGHT);
+ __ push(lr);
+ __ push(r0);
+ }
+ // Calling convention says that first double is in r0 and r1.
+ __ ldr(r0, FieldMemOperand(r1, HeapNumber::kValueOffset));
+ __ ldr(r1, FieldMemOperand(r1, HeapNumber::kValueOffset + kPointerSize));
+ // Call C routine that may not cause GC or other trouble.
+ __ mov(r5, Operand(ExternalReference::double_fp_operation(operation)));
#if !defined(__arm__)
- // Notify the simulator that we are calling an add routine in C.
- __ swi(swi_number);
+ // Notify the simulator that we are calling an add routine in C.
+ __ swi(swi_number);
#else
- // Actually call the add routine written in C.
- __ Call(r5);
+ // Actually call the add routine written in C.
+ __ Call(r5);
#endif
- // Store answer in the overwritable heap number.
- __ pop(r4);
+ // Store answer in the overwritable heap number.
+ __ pop(r4);
#if !defined(__ARM_EABI__) && defined(__arm__)
- // Double returned in fp coprocessor register 0 and 1, encoded as register
- // cr8. Offsets must be divisible by 4 for coprocessor so we need to
- // substract the tag from r4.
- __ sub(r5, r4, Operand(kHeapObjectTag));
- __ stc(p1, cr8, MemOperand(r5, HeapNumber::kValueOffset));
+ // Double returned in fp coprocessor register 0 and 1, encoded as register
+ // cr8. Offsets must be divisible by 4 for coprocessor so we need to
+ // substract the tag from r4.
+ __ sub(r5, r4, Operand(kHeapObjectTag));
+ __ stc(p1, cr8, MemOperand(r5, HeapNumber::kValueOffset));
#else
- // Double returned in fp coprocessor register 0 and 1.
- __ str(r0, FieldMemOperand(r4, HeapNumber::kValueOffset));
- __ str(r1, FieldMemOperand(r4, HeapNumber::kValueOffset + kPointerSize));
+ // Double returned in fp coprocessor register 0 and 1.
+ __ str(r0, FieldMemOperand(r4, HeapNumber::kValueOffset));
+ __ str(r1, FieldMemOperand(r4, HeapNumber::kValueOffset + kPointerSize));
#endif
- __ mov(r0, Operand(r4));
- // And we are done.
- __ pop(pc);
- }
+ __ mov(r0, Operand(r4));
+ // And we are done.
+ __ pop(pc);
}
diff --git a/deps/v8/src/arm/codegen-arm.h b/deps/v8/src/arm/codegen-arm.h
index c098acdd7..a8cb777d7 100644
--- a/deps/v8/src/arm/codegen-arm.h
+++ b/deps/v8/src/arm/codegen-arm.h
@@ -28,7 +28,8 @@
#ifndef V8_ARM_CODEGEN_ARM_H_
#define V8_ARM_CODEGEN_ARM_H_
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
// Forward declarations
class DeferredCode;
@@ -193,8 +194,7 @@ class CodeGenerator: public AstVisitor {
// Accessors
Scope* scope() const { return scope_; }
- // Clearing and generating deferred code.
- void ClearDeferred();
+ // Generating deferred code.
void ProcessDeferred();
bool is_eval() { return is_eval_; }
@@ -205,6 +205,8 @@ class CodeGenerator: public AstVisitor {
JumpTarget* true_target() const { return state_->true_target(); }
JumpTarget* false_target() const { return state_->false_target(); }
+ // We don't track loop nesting level on ARM yet.
+ int loop_nesting() const { return 0; }
// Node visitors.
void VisitStatements(ZoneList<Statement*>* statements);
@@ -317,8 +319,7 @@ class CodeGenerator: public AstVisitor {
Handle<JSFunction> BuildBoilerplate(FunctionLiteral* node);
void ProcessDeclarations(ZoneList<Declaration*>* declarations);
- Handle<Code> ComputeCallInitialize(int argc);
- Handle<Code> ComputeCallInitializeInLoop(int argc);
+ Handle<Code> ComputeCallInitialize(int argc, InLoopFlag in_loop);
// Declare global variables and functions in the given array of
// name/value pairs.
diff --git a/deps/v8/src/arm/constants-arm.h b/deps/v8/src/arm/constants-arm.h
index 66c6a8d86..99eab238c 100644
--- a/deps/v8/src/arm/constants-arm.h
+++ b/deps/v8/src/arm/constants-arm.h
@@ -28,7 +28,8 @@
#ifndef V8_ARM_CONSTANTS_ARM_H_
#define V8_ARM_CONSTANTS_ARM_H_
-namespace assembler { namespace arm {
+namespace assembler {
+namespace arm {
// Defines constants and accessor classes to assemble, disassemble and
// simulate ARM instructions.
diff --git a/deps/v8/src/arm/cpu-arm.cc b/deps/v8/src/arm/cpu-arm.cc
index 736966129..71da1ecc9 100644
--- a/deps/v8/src/arm/cpu-arm.cc
+++ b/deps/v8/src/arm/cpu-arm.cc
@@ -34,7 +34,8 @@
#include "cpu.h"
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
void CPU::Setup() {
// Nothing to do.
diff --git a/deps/v8/src/arm/debug-arm.cc b/deps/v8/src/arm/debug-arm.cc
index f86f981cb..bcfab6c80 100644
--- a/deps/v8/src/arm/debug-arm.cc
+++ b/deps/v8/src/arm/debug-arm.cc
@@ -30,7 +30,8 @@
#include "codegen-inl.h"
#include "debug.h"
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
#ifdef ENABLE_DEBUGGER_SUPPORT
// Currently debug break is not supported in frame exit code on ARM.
diff --git a/deps/v8/src/arm/disasm-arm.cc b/deps/v8/src/arm/disasm-arm.cc
index 3b7474dba..f56a599f8 100644
--- a/deps/v8/src/arm/disasm-arm.cc
+++ b/deps/v8/src/arm/disasm-arm.cc
@@ -62,7 +62,8 @@
#include "platform.h"
-namespace assembler { namespace arm {
+namespace assembler {
+namespace arm {
namespace v8i = v8::internal;
diff --git a/deps/v8/src/arm/frames-arm.cc b/deps/v8/src/arm/frames-arm.cc
index d26198ae1..6fde4b73c 100644
--- a/deps/v8/src/arm/frames-arm.cc
+++ b/deps/v8/src/arm/frames-arm.cc
@@ -31,7 +31,8 @@
#include "arm/assembler-arm-inl.h"
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
StackFrame::Type StackFrame::ComputeType(State* state) {
diff --git a/deps/v8/src/arm/frames-arm.h b/deps/v8/src/arm/frames-arm.h
index 9a18f3d93..a67b18a2b 100644
--- a/deps/v8/src/arm/frames-arm.h
+++ b/deps/v8/src/arm/frames-arm.h
@@ -28,7 +28,8 @@
#ifndef V8_ARM_FRAMES_ARM_H_
#define V8_ARM_FRAMES_ARM_H_
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
// The ARM ABI does not specify the usage of register r9, which may be reserved
diff --git a/deps/v8/src/arm/ic-arm.cc b/deps/v8/src/arm/ic-arm.cc
index b07c4742d..9b45c46a8 100644
--- a/deps/v8/src/arm/ic-arm.cc
+++ b/deps/v8/src/arm/ic-arm.cc
@@ -32,7 +32,8 @@
#include "runtime.h"
#include "stub-cache.h"
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
// ----------------------------------------------------------------------------
@@ -211,7 +212,7 @@ void CallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
// Probe the stub cache.
Code::Flags flags =
- Code::ComputeFlags(Code::CALL_IC, MONOMORPHIC, NORMAL, argc);
+ Code::ComputeFlags(Code::CALL_IC, NOT_IN_LOOP, MONOMORPHIC, NORMAL, argc);
StubCache::GenerateProbe(masm, flags, r1, r2, r3);
// If the stub cache probing failed, the receiver might be a value.
@@ -422,7 +423,9 @@ void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
__ ldr(r0, MemOperand(sp, 0));
// Probe the stub cache.
- Code::Flags flags = Code::ComputeFlags(Code::LOAD_IC, MONOMORPHIC);
+ Code::Flags flags = Code::ComputeFlags(Code::LOAD_IC,
+ NOT_IN_LOOP,
+ MONOMORPHIC);
StubCache::GenerateProbe(masm, flags, r0, r2, r3);
// Cache miss: Jump to runtime.
@@ -755,7 +758,9 @@ void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
// Get the receiver from the stack and probe the stub cache.
__ ldr(r1, MemOperand(sp));
- Code::Flags flags = Code::ComputeFlags(Code::STORE_IC, MONOMORPHIC);
+ Code::Flags flags = Code::ComputeFlags(Code::STORE_IC,
+ NOT_IN_LOOP,
+ MONOMORPHIC);
StubCache::GenerateProbe(masm, flags, r1, r2, r3);
// Cache miss: Jump to runtime.
diff --git a/deps/v8/src/arm/jump-target-arm.cc b/deps/v8/src/arm/jump-target-arm.cc
index 6d375e5ce..65e7eafa6 100644
--- a/deps/v8/src/arm/jump-target-arm.cc
+++ b/deps/v8/src/arm/jump-target-arm.cc
@@ -28,46 +28,47 @@
#include "v8.h"
#include "codegen-inl.h"
+#include "jump-target-inl.h"
#include "register-allocator-inl.h"
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
// -------------------------------------------------------------------------
// JumpTarget implementation.
-#define __ ACCESS_MASM(masm_)
+#define __ ACCESS_MASM(cgen()->masm())
void JumpTarget::DoJump() {
- ASSERT(cgen_ != NULL);
- ASSERT(cgen_->has_valid_frame());
+ ASSERT(cgen()->has_valid_frame());
// Live non-frame registers are not allowed at unconditional jumps
// because we have no way of invalidating the corresponding results
// which are still live in the C++ code.
- ASSERT(cgen_->HasValidEntryRegisters());
+ ASSERT(cgen()->HasValidEntryRegisters());
if (is_bound()) {
// Backward jump. There is an expected frame to merge to.
ASSERT(direction_ == BIDIRECTIONAL);
- cgen_->frame()->MergeTo(entry_frame_);
- cgen_->DeleteFrame();
+ cgen()->frame()->PrepareMergeTo(entry_frame_);
+ cgen()->frame()->MergeTo(entry_frame_);
+ cgen()->DeleteFrame();
__ jmp(&entry_label_);
} else {
+ // Preconfigured entry frame is not used on ARM.
+ ASSERT(entry_frame_ == NULL);
// Forward jump. The current frame is added to the end of the list
// of frames reaching the target block and a jump to the merge code
// is emitted.
- AddReachingFrame(cgen_->frame());
+ AddReachingFrame(cgen()->frame());
RegisterFile empty;
- cgen_->SetFrame(NULL, &empty);
+ cgen()->SetFrame(NULL, &empty);
__ jmp(&merge_labels_.last());
}
-
- is_linked_ = !is_bound_;
}
void JumpTarget::DoBranch(Condition cc, Hint ignored) {
- ASSERT(cgen_ != NULL);
- ASSERT(cgen_->has_valid_frame());
+ ASSERT(cgen()->has_valid_frame());
if (is_bound()) {
ASSERT(direction_ == BIDIRECTIONAL);
@@ -77,29 +78,29 @@ void JumpTarget::DoBranch(Condition cc, Hint ignored) {
// Swap the current frame for a copy (we do the swapping to get
// the off-frame registers off the fall through) to use for the
// branch.
- VirtualFrame* fall_through_frame = cgen_->frame();
+ VirtualFrame* fall_through_frame = cgen()->frame();
VirtualFrame* branch_frame = new VirtualFrame(fall_through_frame);
- RegisterFile non_frame_registers = RegisterAllocator::Reserved();
- cgen_->SetFrame(branch_frame, &non_frame_registers);
+ RegisterFile non_frame_registers;
+ cgen()->SetFrame(branch_frame, &non_frame_registers);
// Check if we can avoid merge code.
- cgen_->frame()->PrepareMergeTo(entry_frame_);
- if (cgen_->frame()->Equals(entry_frame_)) {
+ cgen()->frame()->PrepareMergeTo(entry_frame_);
+ if (cgen()->frame()->Equals(entry_frame_)) {
// Branch right in to the block.
- cgen_->DeleteFrame();
+ cgen()->DeleteFrame();
__ b(cc, &entry_label_);
- cgen_->SetFrame(fall_through_frame, &non_frame_registers);
+ cgen()->SetFrame(fall_through_frame, &non_frame_registers);
return;
}
// Check if we can reuse existing merge code.
for (int i = 0; i < reaching_frames_.length(); i++) {
if (reaching_frames_[i] != NULL &&
- cgen_->frame()->Equals(reaching_frames_[i])) {
+ cgen()->frame()->Equals(reaching_frames_[i])) {
// Branch to the merge code.
- cgen_->DeleteFrame();
+ cgen()->DeleteFrame();
__ b(cc, &merge_labels_[i]);
- cgen_->SetFrame(fall_through_frame, &non_frame_registers);
+ cgen()->SetFrame(fall_through_frame, &non_frame_registers);
return;
}
}
@@ -108,19 +109,20 @@ void JumpTarget::DoBranch(Condition cc, Hint ignored) {
// around the merge code on the fall through path.
Label original_fall_through;
__ b(NegateCondition(cc), &original_fall_through);
- cgen_->frame()->MergeTo(entry_frame_);
- cgen_->DeleteFrame();
+ cgen()->frame()->MergeTo(entry_frame_);
+ cgen()->DeleteFrame();
__ b(&entry_label_);
- cgen_->SetFrame(fall_through_frame, &non_frame_registers);
+ cgen()->SetFrame(fall_through_frame, &non_frame_registers);
__ bind(&original_fall_through);
} else {
+ // Preconfigured entry frame is not used on ARM.
+ ASSERT(entry_frame_ == NULL);
// Forward branch. A copy of the current frame is added to the end
// of the list of frames reaching the target block and a branch to
// the merge code is emitted.
- AddReachingFrame(new VirtualFrame(cgen_->frame()));
+ AddReachingFrame(new VirtualFrame(cgen()->frame()));
__ b(cc, &merge_labels_.last());
- is_linked_ = true;
}
}
@@ -132,70 +134,63 @@ void JumpTarget::Call() {
// at the label (which should be the only one) is the spilled current
// frame plus an in-memory return address. The "fall-through" frame
// at the return site is the spilled current frame.
- ASSERT(cgen_ != NULL);
- ASSERT(cgen_->has_valid_frame());
+ ASSERT(cgen()->has_valid_frame());
// There are no non-frame references across the call.
- ASSERT(cgen_->HasValidEntryRegisters());
+ ASSERT(cgen()->HasValidEntryRegisters());
ASSERT(!is_linked());
- cgen_->frame()->SpillAll();
- VirtualFrame* target_frame = new VirtualFrame(cgen_->frame());
+ cgen()->frame()->SpillAll();
+ VirtualFrame* target_frame = new VirtualFrame(cgen()->frame());
target_frame->Adjust(1);
+ // We do not expect a call with a preconfigured entry frame.
+ ASSERT(entry_frame_ == NULL);
AddReachingFrame(target_frame);
__ bl(&merge_labels_.last());
-
- is_linked_ = !is_bound_;
}
void JumpTarget::DoBind(int mergable_elements) {
- ASSERT(cgen_ != NULL);
ASSERT(!is_bound());
// Live non-frame registers are not allowed at the start of a basic
// block.
- ASSERT(!cgen_->has_valid_frame() || cgen_->HasValidEntryRegisters());
+ ASSERT(!cgen()->has_valid_frame() || cgen()->HasValidEntryRegisters());
if (direction_ == FORWARD_ONLY) {
// A simple case: no forward jumps and no possible backward jumps.
if (!is_linked()) {
// The stack pointer can be floating above the top of the
// virtual frame before the bind. Afterward, it should not.
- ASSERT(cgen_->has_valid_frame());
- VirtualFrame* frame = cgen_->frame();
- int difference =
- frame->stack_pointer_ - (frame->elements_.length() - 1);
+ ASSERT(cgen()->has_valid_frame());
+ VirtualFrame* frame = cgen()->frame();
+ int difference = frame->stack_pointer_ - (frame->element_count() - 1);
if (difference > 0) {
frame->stack_pointer_ -= difference;
__ add(sp, sp, Operand(difference * kPointerSize));
}
-
- is_bound_ = true;
+ __ bind(&entry_label_);
return;
}
// Another simple case: no fall through, a single forward jump,
// and no possible backward jumps.
- if (!cgen_->has_valid_frame() && reaching_frames_.length() == 1) {
+ if (!cgen()->has_valid_frame() && reaching_frames_.length() == 1) {
// Pick up the only reaching frame, take ownership of it, and
// use it for the block about to be emitted.
VirtualFrame* frame = reaching_frames_[0];
- RegisterFile reserved = RegisterAllocator::Reserved();
- cgen_->SetFrame(frame, &reserved);
+ RegisterFile empty;
+ cgen()->SetFrame(frame, &empty);
reaching_frames_[0] = NULL;
__ bind(&merge_labels_[0]);
// The stack pointer can be floating above the top of the
// virtual frame before the bind. Afterward, it should not.
- int difference =
- frame->stack_pointer_ - (frame->elements_.length() - 1);
+ int difference = frame->stack_pointer_ - (frame->element_count() - 1);
if (difference > 0) {
frame->stack_pointer_ -= difference;
__ add(sp, sp, Operand(difference * kPointerSize));
}
-
- is_linked_ = false;
- is_bound_ = true;
+ __ bind(&entry_label_);
return;
}
}
@@ -203,15 +198,17 @@ void JumpTarget::DoBind(int mergable_elements) {
// If there is a current frame, record it as the fall-through. It
// is owned by the reaching frames for now.
bool had_fall_through = false;
- if (cgen_->has_valid_frame()) {
+ if (cgen()->has_valid_frame()) {
had_fall_through = true;
- AddReachingFrame(cgen_->frame());
+ AddReachingFrame(cgen()->frame()); // Return value ignored.
RegisterFile empty;
- cgen_->SetFrame(NULL, &empty);
+ cgen()->SetFrame(NULL, &empty);
}
// Compute the frame to use for entry to the block.
- ComputeEntryFrame(mergable_elements);
+ if (entry_frame_ == NULL) {
+ ComputeEntryFrame(mergable_elements);
+ }
// Some moves required to merge to an expected frame require purely
// frame state changes, and do not require any code generation.
@@ -242,17 +239,17 @@ void JumpTarget::DoBind(int mergable_elements) {
// binding site or as the fall through from a previous merge
// code block. Jump around the code we are about to
// generate.
- if (cgen_->has_valid_frame()) {
- cgen_->DeleteFrame();
+ if (cgen()->has_valid_frame()) {
+ cgen()->DeleteFrame();
__ b(&entry_label_);
}
// Pick up the frame for this block. Assume ownership if
// there cannot be backward jumps.
- RegisterFile reserved = RegisterAllocator::Reserved();
+ RegisterFile empty;
if (direction_ == BIDIRECTIONAL) {
- cgen_->SetFrame(new VirtualFrame(frame), &reserved);
+ cgen()->SetFrame(new VirtualFrame(frame), &empty);
} else {
- cgen_->SetFrame(frame, &reserved);
+ cgen()->SetFrame(frame, &empty);
reaching_frames_[i] = NULL;
}
__ bind(&merge_labels_[i]);
@@ -261,23 +258,22 @@ void JumpTarget::DoBind(int mergable_elements) {
// looking for any that can share merge code with this one.
for (int j = 0; j < i; j++) {
VirtualFrame* other = reaching_frames_[j];
- if (other != NULL && other->Equals(cgen_->frame())) {
+ if (other != NULL && other->Equals(cgen()->frame())) {
// Set the reaching frame element to null to avoid
// processing it later, and then bind its entry label.
- delete other;
reaching_frames_[j] = NULL;
__ bind(&merge_labels_[j]);
}
}
// Emit the merge code.
- cgen_->frame()->MergeTo(entry_frame_);
+ cgen()->frame()->MergeTo(entry_frame_);
} else if (i == reaching_frames_.length() - 1 && had_fall_through) {
// If this is the fall through, and it didn't need merge
// code, we need to pick up the frame so we can jump around
// subsequent merge blocks if necessary.
- RegisterFile reserved = RegisterAllocator::Reserved();
- cgen_->SetFrame(frame, &reserved);
+ RegisterFile empty;
+ cgen()->SetFrame(frame, &empty);
reaching_frames_[i] = NULL;
}
}
@@ -286,22 +282,17 @@ void JumpTarget::DoBind(int mergable_elements) {
// The code generator may not have a current frame if there was no
// fall through and none of the reaching frames needed merging.
// In that case, clone the entry frame as the current frame.
- if (!cgen_->has_valid_frame()) {
- RegisterFile reserved_registers = RegisterAllocator::Reserved();
- cgen_->SetFrame(new VirtualFrame(entry_frame_), &reserved_registers);
+ if (!cgen()->has_valid_frame()) {
+ RegisterFile empty;
+ cgen()->SetFrame(new VirtualFrame(entry_frame_), &empty);
}
- // There is certainly a current frame equal to the entry frame.
- // Bind the entry frame label.
- __ bind(&entry_label_);
-
// There may be unprocessed reaching frames that did not need
// merge code. They will have unbound merge labels. Bind their
// merge labels to be the same as the entry label and deallocate
// them.
for (int i = 0; i < reaching_frames_.length(); i++) {
if (!merge_labels_[i].is_bound()) {
- delete reaching_frames_[i];
reaching_frames_[i] = NULL;
__ bind(&merge_labels_[i]);
}
@@ -318,15 +309,13 @@ void JumpTarget::DoBind(int mergable_elements) {
// Use a copy of the reaching frame so the original can be saved
// for possible reuse as a backward merge block.
- RegisterFile reserved = RegisterAllocator::Reserved();
- cgen_->SetFrame(new VirtualFrame(reaching_frames_[0]), &reserved);
+ RegisterFile empty;
+ cgen()->SetFrame(new VirtualFrame(reaching_frames_[0]), &empty);
__ bind(&merge_labels_[0]);
- cgen_->frame()->MergeTo(entry_frame_);
- __ bind(&entry_label_);
+ cgen()->frame()->MergeTo(entry_frame_);
}
- is_linked_ = false;
- is_bound_ = true;
+ __ bind(&entry_label_);
}
#undef __
diff --git a/deps/v8/src/arm/macro-assembler-arm.cc b/deps/v8/src/arm/macro-assembler-arm.cc
index 365c1ad7f..4e24063c9 100644
--- a/deps/v8/src/arm/macro-assembler-arm.cc
+++ b/deps/v8/src/arm/macro-assembler-arm.cc
@@ -32,7 +32,8 @@
#include "debug.h"
#include "runtime.h"
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
// Give alias names to registers
Register cp = { 8 }; // JavaScript context pointer
@@ -58,7 +59,10 @@ MacroAssembler::MacroAssembler(void* buffer, int size)
// We do not support thumb inter-working with an arm architecture not supporting
// the blx instruction (below v5t)
#if defined(__THUMB_INTERWORK__)
-#if !defined(__ARM_ARCH_5T__) && !defined(__ARM_ARCH_5TE__)
+#if !defined(__ARM_ARCH_5T__) && \
+ !defined(__ARM_ARCH_5TE__) && \
+ !defined(__ARM_ARCH_7A__) && \
+ !defined(__ARM_ARCH_7__)
// add tests for other versions above v5t as required
#error "for thumb inter-working we require architecture v5t or above"
#endif
@@ -291,6 +295,12 @@ void MacroAssembler::LeaveFrame(StackFrame::Type type) {
void MacroAssembler::EnterExitFrame(StackFrame::Type type) {
ASSERT(type == StackFrame::EXIT || type == StackFrame::EXIT_DEBUG);
+
+ // Compute the argv pointer and keep it in a callee-saved register.
+ // r0 is argc.
+ add(r6, sp, Operand(r0, LSL, kPointerSizeLog2));
+ sub(r6, r6, Operand(kPointerSize));
+
// Compute parameter pointer before making changes and save it as ip
// register so that it is restored as sp register on exit, thereby
// popping the args.
@@ -298,6 +308,17 @@ void MacroAssembler::EnterExitFrame(StackFrame::Type type) {
// ip = sp + kPointerSize * #args;
add(ip, sp, Operand(r0, LSL, kPointerSizeLog2));
+ // Align the stack at this point. After this point we have 5 pushes,
+ // so in fact we have to unalign here! See also the assert on the
+ // alignment immediately below.
+ if (OS::ActivationFrameAlignment() != kPointerSize) {
+ // This code needs to be made more general if this assert doesn't hold.
+ ASSERT(OS::ActivationFrameAlignment() == 2 * kPointerSize);
+ mov(r7, Operand(Smi::FromInt(0)));
+ tst(sp, Operand(OS::ActivationFrameAlignment() - 1));
+ push(r7, eq); // Conditional push instruction.
+ }
+
// Push in reverse order: caller_fp, sp_on_exit, and caller_pc.
stm(db_w, sp, fp.bit() | ip.bit() | lr.bit());
mov(fp, Operand(sp)); // setup new frame pointer
@@ -316,9 +337,6 @@ void MacroAssembler::EnterExitFrame(StackFrame::Type type) {
mov(r4, Operand(r0));
mov(r5, Operand(r1));
- // Compute the argv pointer and keep it in a callee-saved register.
- add(r6, fp, Operand(r4, LSL, kPointerSizeLog2));
- add(r6, r6, Operand(ExitFrameConstants::kPPDisplacement - kPointerSize));
#ifdef ENABLE_DEBUGGER_SUPPORT
// Save the state of all registers to the stack from the memory
diff --git a/deps/v8/src/arm/macro-assembler-arm.h b/deps/v8/src/arm/macro-assembler-arm.h
index e336757e0..27eeab2e9 100644
--- a/deps/v8/src/arm/macro-assembler-arm.h
+++ b/deps/v8/src/arm/macro-assembler-arm.h
@@ -30,7 +30,8 @@
#include "assembler.h"
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
// Give alias names to registers
diff --git a/deps/v8/src/arm/regexp-macro-assembler-arm.cc b/deps/v8/src/arm/regexp-macro-assembler-arm.cc
index bf07f0e3d..78ebc7e80 100644
--- a/deps/v8/src/arm/regexp-macro-assembler-arm.cc
+++ b/deps/v8/src/arm/regexp-macro-assembler-arm.cc
@@ -30,7 +30,8 @@
#include "regexp-macro-assembler.h"
#include "arm/regexp-macro-assembler-arm.h"
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
RegExpMacroAssemblerARM::RegExpMacroAssemblerARM() {
UNIMPLEMENTED();
diff --git a/deps/v8/src/arm/regexp-macro-assembler-arm.h b/deps/v8/src/arm/regexp-macro-assembler-arm.h
index 2f38bb73e..de5518379 100644
--- a/deps/v8/src/arm/regexp-macro-assembler-arm.h
+++ b/deps/v8/src/arm/regexp-macro-assembler-arm.h
@@ -28,7 +28,8 @@
#ifndef V8_ARM_REGEXP_MACRO_ASSEMBLER_ARM_H_
#define V8_ARM_REGEXP_MACRO_ASSEMBLER_ARM_H_
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
class RegExpMacroAssemblerARM: public RegExpMacroAssembler {
public:
diff --git a/deps/v8/src/arm/register-allocator-arm-inl.h b/deps/v8/src/arm/register-allocator-arm-inl.h
new file mode 100644
index 000000000..d98818f0f
--- /dev/null
+++ b/deps/v8/src/arm/register-allocator-arm-inl.h
@@ -0,0 +1,103 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_ARM_REGISTER_ALLOCATOR_ARM_INL_H_
+#define V8_ARM_REGISTER_ALLOCATOR_ARM_INL_H_
+
+#include "v8.h"
+
+namespace v8 {
+namespace internal {
+
+// -------------------------------------------------------------------------
+// RegisterAllocator implementation.
+
+bool RegisterAllocator::IsReserved(Register reg) {
+ return reg.is(cp) || reg.is(fp) || reg.is(sp) || reg.is(pc);
+}
+
+
+
+// The register allocator uses small integers to represent the
+// non-reserved assembler registers. The mapping is:
+//
+// r0 <-> 0
+// r1 <-> 1
+// r2 <-> 2
+// r3 <-> 3
+// r4 <-> 4
+// r5 <-> 5
+// r6 <-> 6
+// r7 <-> 7
+// r9 <-> 8
+// r10 <-> 9
+// ip <-> 10
+// lr <-> 11
+
+int RegisterAllocator::ToNumber(Register reg) {
+ ASSERT(reg.is_valid() && !IsReserved(reg));
+ static int numbers[] = {
+ 0, // r0
+ 1, // r1
+ 2, // r2
+ 3, // r3
+ 4, // r4
+ 5, // r5
+ 6, // r6
+ 7, // r7
+ -1, // cp
+ 8, // r9
+ 9, // r10
+ -1, // fp
+ 10, // ip
+ -1, // sp
+ 11, // lr
+ -1 // pc
+ };
+ return numbers[reg.code()];
+}
+
+
+Register RegisterAllocator::ToRegister(int num) {
+ ASSERT(num >= 0 && num < kNumRegisters);
+ static Register registers[] =
+ { r0, r1, r2, r3, r4, r5, r6, r7, r9, r10, ip, lr };
+ return registers[num];
+}
+
+
+void RegisterAllocator::Initialize() {
+ Reset();
+ // The non-reserved r1 and lr registers are live on JS function entry.
+ Use(r1); // JS function.
+ Use(lr); // Return address.
+}
+
+
+} } // namespace v8::internal
+
+#endif // V8_ARM_REGISTER_ALLOCATOR_ARM_INL_H_
diff --git a/deps/v8/src/arm/register-allocator-arm.cc b/deps/v8/src/arm/register-allocator-arm.cc
index d468c84e3..ad0c7f9d4 100644
--- a/deps/v8/src/arm/register-allocator-arm.cc
+++ b/deps/v8/src/arm/register-allocator-arm.cc
@@ -30,7 +30,8 @@
#include "codegen-inl.h"
#include "register-allocator-inl.h"
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
// -------------------------------------------------------------------------
// Result implementation.
@@ -48,56 +49,10 @@ void Result::ToRegister(Register target) {
// -------------------------------------------------------------------------
// RegisterAllocator implementation.
-RegisterFile RegisterAllocator::Reserved() {
- RegisterFile reserved;
- reserved.Use(sp);
- reserved.Use(fp);
- reserved.Use(cp);
- reserved.Use(pc);
- return reserved;
-}
-
-
-void RegisterAllocator::UnuseReserved(RegisterFile* register_file) {
- register_file->ref_counts_[sp.code()] = 0;
- register_file->ref_counts_[fp.code()] = 0;
- register_file->ref_counts_[cp.code()] = 0;
- register_file->ref_counts_[pc.code()] = 0;
-}
-
-
-bool RegisterAllocator::IsReserved(int reg_code) {
- return (reg_code == sp.code())
- || (reg_code == fp.code())
- || (reg_code == cp.code())
- || (reg_code == pc.code());
-}
-
-
-void RegisterAllocator::Initialize() {
- Reset();
- // The following registers are live on function entry, saved in the
- // frame, and available for allocation during execution.
- Use(r1); // JS function.
- Use(lr); // Return address.
-}
-
-
-void RegisterAllocator::Reset() {
- registers_.Reset();
- // The following registers are live on function entry and reserved
- // during execution.
- Use(sp); // Stack pointer.
- Use(fp); // Frame pointer (caller's frame pointer on entry).
- Use(cp); // Context context (callee's context on entry).
- Use(pc); // Program counter.
-}
-
-
Result RegisterAllocator::AllocateByteRegisterWithoutSpilling() {
- UNIMPLEMENTED();
- Result invalid(cgen_);
- return invalid;
+ // No byte registers on ARM.
+ UNREACHABLE();
+ return Result();
}
diff --git a/deps/v8/src/arm/register-allocator-arm.h b/deps/v8/src/arm/register-allocator-arm.h
new file mode 100644
index 000000000..f953ed9f1
--- /dev/null
+++ b/deps/v8/src/arm/register-allocator-arm.h
@@ -0,0 +1,43 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_ARM_REGISTER_ALLOCATOR_ARM_H_
+#define V8_ARM_REGISTER_ALLOCATOR_ARM_H_
+
+namespace v8 {
+namespace internal {
+
+class RegisterAllocatorConstants : public AllStatic {
+ public:
+ static const int kNumRegisters = 12;
+ static const int kInvalidRegister = -1;
+};
+
+
+} } // namespace v8::internal
+
+#endif // V8_ARM_REGISTER_ALLOCATOR_ARM_H_
diff --git a/deps/v8/src/arm/simulator-arm.cc b/deps/v8/src/arm/simulator-arm.cc
index 9737e9539..b8b66636c 100644
--- a/deps/v8/src/arm/simulator-arm.cc
+++ b/deps/v8/src/arm/simulator-arm.cc
@@ -36,7 +36,8 @@
#if !defined(__arm__)
// Only build the simulator if not compiling for real ARM hardware.
-namespace assembler { namespace arm {
+namespace assembler {
+namespace arm {
using ::v8::internal::Object;
using ::v8::internal::PrintF;
diff --git a/deps/v8/src/arm/simulator-arm.h b/deps/v8/src/arm/simulator-arm.h
index 2029fd3bc..d4a395aca 100644
--- a/deps/v8/src/arm/simulator-arm.h
+++ b/deps/v8/src/arm/simulator-arm.h
@@ -66,7 +66,8 @@
#include "constants-arm.h"
-namespace assembler { namespace arm {
+namespace assembler {
+namespace arm {
class Simulator {
public:
diff --git a/deps/v8/src/arm/stub-cache-arm.cc b/deps/v8/src/arm/stub-cache-arm.cc
index 56afa0288..c09f9e3b6 100644
--- a/deps/v8/src/arm/stub-cache-arm.cc
+++ b/deps/v8/src/arm/stub-cache-arm.cc
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2006-2009 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -31,7 +31,8 @@
#include "codegen-inl.h"
#include "stub-cache.h"
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
#define __ ACCESS_MASM(masm)
@@ -61,7 +62,7 @@ static void ProbeTable(MacroAssembler* masm,
// Check that the flags match what we're looking for.
__ ldr(offset, FieldMemOperand(offset, Code::kFlagsOffset));
- __ and_(offset, offset, Operand(~Code::kFlagsTypeMask));
+ __ and_(offset, offset, Operand(~Code::kFlagsNotUsedInLookup));
__ cmp(offset, Operand(flags));
__ b(ne, &miss);
@@ -245,6 +246,7 @@ void StubCompiler::GenerateLoadCallback(MacroAssembler* masm,
void StubCompiler::GenerateLoadInterceptor(MacroAssembler* masm,
JSObject* object,
JSObject* holder,
+ Smi* lookup_hint,
Register receiver,
Register name,
Register scratch1,
@@ -262,11 +264,13 @@ void StubCompiler::GenerateLoadInterceptor(MacroAssembler* masm,
__ push(receiver); // receiver
__ push(reg); // holder
__ push(name); // name
+ __ mov(scratch1, Operand(lookup_hint));
+ __ push(scratch1);
// Do tail-call to the runtime system.
ExternalReference load_ic_property =
ExternalReference(IC_Utility(IC::kLoadInterceptorProperty));
- __ TailCallRuntime(load_ic_property, 3);
+ __ TailCallRuntime(load_ic_property, 4);
}
@@ -494,7 +498,9 @@ Object* StubCompiler::CompileLazyCompile(Code::Flags flags) {
Object* CallStubCompiler::CompileCallField(Object* object,
JSObject* holder,
int index,
- String* name) {
+ String* name,
+ Code::Flags flags) {
+ ASSERT_EQ(FIELD, Code::ExtractTypeFromFlags(flags));
// ----------- S t a t e -------------
// -- lr: return address
// -----------------------------------
@@ -538,14 +544,16 @@ Object* CallStubCompiler::CompileCallField(Object* object,
__ Jump(ic, RelocInfo::CODE_TARGET);
// Return the generated code.
- return GetCode(FIELD, name);
+ return GetCodeWithFlags(flags, name);
}
Object* CallStubCompiler::CompileCallConstant(Object* object,
JSObject* holder,
JSFunction* function,
- CheckType check) {
+ CheckType check,
+ Code::Flags flags) {
+ ASSERT_EQ(CONSTANT_FUNCTION, Code::ExtractTypeFromFlags(flags));
// ----------- S t a t e -------------
// -- lr: return address
// -----------------------------------
@@ -663,7 +671,7 @@ Object* CallStubCompiler::CompileCallConstant(Object* object,
if (function->shared()->name()->IsString()) {
function_name = String::cast(function->shared()->name());
}
- return GetCode(CONSTANT_FUNCTION, function_name);
+ return GetCodeWithFlags(flags, function_name);
}
@@ -904,7 +912,15 @@ Object* LoadStubCompiler::CompileLoadInterceptor(JSObject* object,
__ ldr(r0, MemOperand(sp, 0));
- GenerateLoadInterceptor(masm(), object, holder, r0, r2, r3, r1, &miss);
+ GenerateLoadInterceptor(masm(),
+ object,
+ holder,
+ holder->InterceptorPropertyLookupHint(name),
+ r0,
+ r2,
+ r3,
+ r1,
+ &miss);
__ bind(&miss);
GenerateLoadMiss(masm(), Code::LOAD_IC);
@@ -1010,7 +1026,15 @@ Object* KeyedLoadStubCompiler::CompileLoadInterceptor(JSObject* receiver,
__ cmp(r2, Operand(Handle<String>(name)));
__ b(ne, &miss);
- GenerateLoadInterceptor(masm(), receiver, holder, r0, r2, r3, r1, &miss);
+ GenerateLoadInterceptor(masm(),
+ receiver,
+ holder,
+ Smi::FromInt(JSObject::kLookupInHolder),
+ r0,
+ r2,
+ r3,
+ r1,
+ &miss);
__ bind(&miss);
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
diff --git a/deps/v8/src/arm/virtual-frame-arm.cc b/deps/v8/src/arm/virtual-frame-arm.cc
index 43100f1ec..952738329 100644
--- a/deps/v8/src/arm/virtual-frame-arm.cc
+++ b/deps/v8/src/arm/virtual-frame-arm.cc
@@ -31,31 +31,25 @@
#include "register-allocator-inl.h"
#include "scopes.h"
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
// -------------------------------------------------------------------------
// VirtualFrame implementation.
-#define __ ACCESS_MASM(masm_)
+#define __ ACCESS_MASM(masm())
// On entry to a function, the virtual frame already contains the
// receiver and the parameters. All initial frame elements are in
// memory.
-VirtualFrame::VirtualFrame(CodeGenerator* cgen)
- : cgen_(cgen),
- masm_(cgen->masm()),
- elements_(cgen->scope()->num_parameters()
- + cgen->scope()->num_stack_slots()
- + kPreallocatedElements),
- parameter_count_(cgen->scope()->num_parameters()),
- local_count_(0),
- stack_pointer_(parameter_count_), // 0-based index of TOS.
- frame_pointer_(kIllegalIndex) {
- for (int i = 0; i < parameter_count_ + 1; i++) {
+VirtualFrame::VirtualFrame()
+ : elements_(parameter_count() + local_count() + kPreallocatedElements),
+ stack_pointer_(parameter_count()) { // 0-based index of TOS.
+ for (int i = 0; i <= stack_pointer_; i++) {
elements_.Add(FrameElement::MemoryElement());
}
- for (int i = 0; i < kNumRegisters; i++) {
+ for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
register_locations_[i] = kIllegalIndex;
}
}
@@ -82,10 +76,10 @@ void VirtualFrame::SyncRange(int begin, int end) {
void VirtualFrame::MergeTo(VirtualFrame* expected) {
- Comment cmnt(masm_, "[ Merge frame");
+ Comment cmnt(masm(), "[ Merge frame");
// We should always be merging the code generator's current frame to an
// expected frame.
- ASSERT(cgen_->frame() == this);
+ ASSERT(cgen()->frame() == this);
// Adjust the stack pointer upward (toward the top of the virtual
// frame) if necessary.
@@ -102,7 +96,7 @@ void VirtualFrame::MergeTo(VirtualFrame* expected) {
// Fix any sync bit problems from the bottom-up, stopping when we
// hit the stack pointer or the top of the frame if the stack
// pointer is floating above the frame.
- int limit = Min(stack_pointer_, elements_.length() - 1);
+ int limit = Min(static_cast<int>(stack_pointer_), element_count() - 1);
for (int i = 0; i <= limit; i++) {
FrameElement source = elements_[i];
FrameElement target = expected->elements_[i];
@@ -134,7 +128,7 @@ void VirtualFrame::MergeMoveRegistersToMemory(VirtualFrame* expected) {
// On ARM, all elements are in memory.
#ifdef DEBUG
- int start = Min(stack_pointer_, elements_.length() - 1);
+ int start = Min(static_cast<int>(stack_pointer_), element_count() - 1);
for (int i = start; i >= 0; i--) {
ASSERT(elements_[i].is_memory());
ASSERT(expected->elements_[i].is_memory());
@@ -147,12 +141,12 @@ void VirtualFrame::MergeMoveRegistersToRegisters(VirtualFrame* expected) {
}
-void VirtualFrame::MergeMoveMemoryToRegisters(VirtualFrame *expected) {
+void VirtualFrame::MergeMoveMemoryToRegisters(VirtualFrame* expected) {
}
void VirtualFrame::Enter() {
- Comment cmnt(masm_, "[ Enter JS frame");
+ Comment cmnt(masm(), "[ Enter JS frame");
#ifdef DEBUG
// Verify that r1 contains a JS function. The following code relies
@@ -175,15 +169,14 @@ void VirtualFrame::Enter() {
Adjust(4);
__ stm(db_w, sp, r1.bit() | cp.bit() | fp.bit() | lr.bit());
// Adjust FP to point to saved FP.
- frame_pointer_ = elements_.length() - 2;
__ add(fp, sp, Operand(2 * kPointerSize));
- cgen_->allocator()->Unuse(r1);
- cgen_->allocator()->Unuse(lr);
+ cgen()->allocator()->Unuse(r1);
+ cgen()->allocator()->Unuse(lr);
}
void VirtualFrame::Exit() {
- Comment cmnt(masm_, "[ Exit JS frame");
+ Comment cmnt(masm(), "[ Exit JS frame");
// Drop the execution stack down to the frame pointer and restore the caller
// frame pointer and return address.
__ mov(sp, fp);
@@ -191,12 +184,11 @@ void VirtualFrame::Exit() {
}
-void VirtualFrame::AllocateStackSlots(int count) {
- ASSERT(height() == 0);
- local_count_ = count;
- Adjust(count);
+void VirtualFrame::AllocateStackSlots() {
+ int count = local_count();
if (count > 0) {
- Comment cmnt(masm_, "[ Allocate space for locals");
+ Comment cmnt(masm(), "[ Allocate space for locals");
+ Adjust(count);
// Initialize stack slots with 'undefined' value.
__ mov(ip, Operand(Factory::undefined_value()));
for (int i = 0; i < count; i++) {
@@ -246,9 +238,9 @@ void VirtualFrame::PushTryHandler(HandlerType type) {
Result VirtualFrame::RawCallStub(CodeStub* stub) {
- ASSERT(cgen_->HasValidEntryRegisters());
+ ASSERT(cgen()->HasValidEntryRegisters());
__ CallStub(stub);
- Result result = cgen_->allocator()->Allocate(r0);
+ Result result = cgen()->allocator()->Allocate(r0);
ASSERT(result.is_valid());
return result;
}
@@ -271,9 +263,9 @@ Result VirtualFrame::CallStub(CodeStub* stub, Result* arg0, Result* arg1) {
Result VirtualFrame::CallRuntime(Runtime::Function* f, int arg_count) {
PrepareForCall(arg_count, arg_count);
- ASSERT(cgen_->HasValidEntryRegisters());
+ ASSERT(cgen()->HasValidEntryRegisters());
__ CallRuntime(f, arg_count);
- Result result = cgen_->allocator()->Allocate(r0);
+ Result result = cgen()->allocator()->Allocate(r0);
ASSERT(result.is_valid());
return result;
}
@@ -281,9 +273,9 @@ Result VirtualFrame::CallRuntime(Runtime::Function* f, int arg_count) {
Result VirtualFrame::CallRuntime(Runtime::FunctionId id, int arg_count) {
PrepareForCall(arg_count, arg_count);
- ASSERT(cgen_->HasValidEntryRegisters());
+ ASSERT(cgen()->HasValidEntryRegisters());
__ CallRuntime(id, arg_count);
- Result result = cgen_->allocator()->Allocate(r0);
+ Result result = cgen()->allocator()->Allocate(r0);
ASSERT(result.is_valid());
return result;
}
@@ -297,16 +289,16 @@ Result VirtualFrame::InvokeBuiltin(Builtins::JavaScript id,
PrepareForCall(arg_count, arg_count);
arg_count_register->Unuse();
__ InvokeBuiltin(id, flags);
- Result result = cgen_->allocator()->Allocate(r0);
+ Result result = cgen()->allocator()->Allocate(r0);
return result;
}
Result VirtualFrame::RawCallCodeObject(Handle<Code> code,
RelocInfo::Mode rmode) {
- ASSERT(cgen_->HasValidEntryRegisters());
+ ASSERT(cgen()->HasValidEntryRegisters());
__ Call(code, rmode);
- Result result = cgen_->allocator()->Allocate(r0);
+ Result result = cgen()->allocator()->Allocate(r0);
ASSERT(result.is_valid());
return result;
}
@@ -401,7 +393,7 @@ Result VirtualFrame::CallCodeObject(Handle<Code> code,
void VirtualFrame::Drop(int count) {
ASSERT(height() >= count);
- int num_virtual_elements = (elements_.length() - 1) - stack_pointer_;
+ int num_virtual_elements = (element_count() - 1) - stack_pointer_;
// Emit code to lower the stack pointer if necessary.
if (num_virtual_elements < count) {
@@ -422,13 +414,12 @@ void VirtualFrame::Drop(int count) {
Result VirtualFrame::Pop() {
UNIMPLEMENTED();
- Result invalid(cgen_);
- return invalid;
+ return Result();
}
void VirtualFrame::EmitPop(Register reg) {
- ASSERT(stack_pointer_ == elements_.length() - 1);
+ ASSERT(stack_pointer_ == element_count() - 1);
stack_pointer_--;
elements_.RemoveLast();
__ pop(reg);
@@ -436,7 +427,7 @@ void VirtualFrame::EmitPop(Register reg) {
void VirtualFrame::EmitPush(Register reg) {
- ASSERT(stack_pointer_ == elements_.length() - 1);
+ ASSERT(stack_pointer_ == element_count() - 1);
elements_.Add(FrameElement::MemoryElement());
stack_pointer_++;
__ push(reg);
diff --git a/deps/v8/src/arm/virtual-frame-arm.h b/deps/v8/src/arm/virtual-frame-arm.h
index 371a23e93..ebebd534a 100644
--- a/deps/v8/src/arm/virtual-frame-arm.h
+++ b/deps/v8/src/arm/virtual-frame-arm.h
@@ -29,8 +29,10 @@
#define V8_ARM_VIRTUAL_FRAME_ARM_H_
#include "register-allocator.h"
+#include "scopes.h"
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
// -------------------------------------------------------------------------
// Virtual frames
@@ -41,7 +43,7 @@ namespace v8 { namespace internal {
// as random access to the expression stack elements, locals, and
// parameters.
-class VirtualFrame : public Malloced {
+class VirtualFrame : public ZoneObject {
public:
// A utility class to introduce a scope where the virtual frame is
// expected to remain spilled. The constructor spills the code
@@ -50,42 +52,66 @@ class VirtualFrame : public Malloced {
// generator is being transformed.
class SpilledScope BASE_EMBEDDED {
public:
- explicit SpilledScope(CodeGenerator* cgen);
+ SpilledScope() : previous_state_(cgen()->in_spilled_code()) {
+ ASSERT(cgen()->has_valid_frame());
+ cgen()->frame()->SpillAll();
+ cgen()->set_in_spilled_code(true);
+ }
- ~SpilledScope();
+ ~SpilledScope() {
+ cgen()->set_in_spilled_code(previous_state_);
+ }
private:
- CodeGenerator* cgen_;
bool previous_state_;
+
+ CodeGenerator* cgen() { return CodeGeneratorScope::Current(); }
};
// An illegal index into the virtual frame.
static const int kIllegalIndex = -1;
// Construct an initial virtual frame on entry to a JS function.
- explicit VirtualFrame(CodeGenerator* cgen);
+ VirtualFrame();
// Construct a virtual frame as a clone of an existing one.
explicit VirtualFrame(VirtualFrame* original);
+ CodeGenerator* cgen() { return CodeGeneratorScope::Current(); }
+ MacroAssembler* masm() { return cgen()->masm(); }
+
// Create a duplicate of an existing valid frame element.
FrameElement CopyElementAt(int index);
+ // The number of elements on the virtual frame.
+ int element_count() { return elements_.length(); }
+
// The height of the virtual expression stack.
- int height() const {
- return elements_.length() - expression_base_index();
+ int height() {
+ return element_count() - expression_base_index();
+ }
+
+ int register_location(int num) {
+ ASSERT(num >= 0 && num < RegisterAllocator::kNumRegisters);
+ return register_locations_[num];
+ }
+
+ int register_location(Register reg) {
+ return register_locations_[RegisterAllocator::ToNumber(reg)];
}
- int register_index(Register reg) {
- return register_locations_[reg.code()];
+ void set_register_location(Register reg, int index) {
+ register_locations_[RegisterAllocator::ToNumber(reg)] = index;
}
- bool is_used(int reg_code) {
- return register_locations_[reg_code] != kIllegalIndex;
+ bool is_used(int num) {
+ ASSERT(num >= 0 && num < RegisterAllocator::kNumRegisters);
+ return register_locations_[num] != kIllegalIndex;
}
bool is_used(Register reg) {
- return is_used(reg.code());
+ return register_locations_[RegisterAllocator::ToNumber(reg)]
+ != kIllegalIndex;
}
// Add extra in-memory elements to the top of the frame to match an actual
@@ -95,7 +121,12 @@ class VirtualFrame : public Malloced {
// Forget elements from the top of the frame to match an actual frame (eg,
// the frame after a runtime call). No code is emitted.
- void Forget(int count);
+ void Forget(int count) {
+ ASSERT(count >= 0);
+ ASSERT(stack_pointer_ == element_count() - 1);
+ stack_pointer_ -= count;
+ ForgetElements(count);
+ }
// Forget count elements from the top of the frame without adjusting
// the stack pointer downward. This is used, for example, before
@@ -106,7 +137,9 @@ class VirtualFrame : public Malloced {
void SpillAll();
// Spill all occurrences of a specific register from the frame.
- void Spill(Register reg);
+ void Spill(Register reg) {
+ if (is_used(reg)) SpillElementAt(register_location(reg));
+ }
// Spill all occurrences of an arbitrary register if possible. Return the
// register spilled or no_reg if it was not possible to free any register
@@ -127,13 +160,23 @@ class VirtualFrame : public Malloced {
// tells the register allocator that it is free to use frame-internal
// registers. Used when the code generator's frame is switched from this
// one to NULL by an unconditional jump.
- void DetachFromCodeGenerator();
+ void DetachFromCodeGenerator() {
+ RegisterAllocator* cgen_allocator = cgen()->allocator();
+ for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
+ if (is_used(i)) cgen_allocator->Unuse(i);
+ }
+ }
// (Re)attach a frame to its code generator. This informs the register
// allocator that the frame-internal register references are active again.
// Used when a code generator's frame is switched from NULL to this one by
// binding a label.
- void AttachToCodeGenerator();
+ void AttachToCodeGenerator() {
+ RegisterAllocator* cgen_allocator = cgen()->allocator();
+ for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
+ if (is_used(i)) cgen_allocator->Unuse(i);
+ }
+ }
// Emit code for the physical JS entry and exit frame sequences. After
// calling Enter, the virtual frame is ready for use; and after calling
@@ -149,13 +192,13 @@ class VirtualFrame : public Malloced {
void PrepareForReturn();
// Allocate and initialize the frame-allocated locals.
- void AllocateStackSlots(int count);
+ void AllocateStackSlots();
// The current top of the expression stack as an assembly operand.
- MemOperand Top() const { return MemOperand(sp, 0); }
+ MemOperand Top() { return MemOperand(sp, 0); }
// An element of the expression stack as an assembly operand.
- MemOperand ElementAt(int index) const {
+ MemOperand ElementAt(int index) {
return MemOperand(sp, index * kPointerSize);
}
@@ -165,18 +208,18 @@ class VirtualFrame : public Malloced {
// Set a frame element to a constant. The index is frame-top relative.
void SetElementAt(int index, Handle<Object> value) {
- Result temp(value, cgen_);
+ Result temp(value);
SetElementAt(index, &temp);
}
void PushElementAt(int index) {
- PushFrameSlotAt(elements_.length() - index - 1);
+ PushFrameSlotAt(element_count() - index - 1);
}
// A frame-allocated local as an assembly operand.
- MemOperand LocalAt(int index) const {
+ MemOperand LocalAt(int index) {
ASSERT(0 <= index);
- ASSERT(index < local_count_);
+ ASSERT(index < local_count());
return MemOperand(fp, kLocal0Offset - index * kPointerSize);
}
@@ -202,13 +245,13 @@ class VirtualFrame : public Malloced {
void PushReceiverSlotAddress();
// The function frame slot.
- MemOperand Function() const { return MemOperand(fp, kFunctionOffset); }
+ MemOperand Function() { return MemOperand(fp, kFunctionOffset); }
// Push the function on top of the frame.
void PushFunction() { PushFrameSlotAt(function_index()); }
// The context frame slot.
- MemOperand Context() const { return MemOperand(fp, kContextOffset); }
+ MemOperand Context() { return MemOperand(fp, kContextOffset); }
// Save the value of the esi register to the context frame slot.
void SaveContextRegister();
@@ -218,10 +261,11 @@ class VirtualFrame : public Malloced {
void RestoreContextRegister();
// A parameter as an assembly operand.
- MemOperand ParameterAt(int index) const {
+ MemOperand ParameterAt(int index) {
// Index -1 corresponds to the receiver.
- ASSERT(-1 <= index && index <= parameter_count_);
- return MemOperand(fp, (1 + parameter_count_ - index) * kPointerSize);
+ ASSERT(-1 <= index); // -1 is the receiver.
+ ASSERT(index <= parameter_count());
+ return MemOperand(fp, (1 + parameter_count() - index) * kPointerSize);
}
// Push a copy of the value of a parameter frame slot on top of the frame.
@@ -243,14 +287,17 @@ class VirtualFrame : public Malloced {
}
// The receiver frame slot.
- MemOperand Receiver() const { return ParameterAt(-1); }
+ MemOperand Receiver() { return ParameterAt(-1); }
// Push a try-catch or try-finally handler on top of the virtual frame.
void PushTryHandler(HandlerType type);
// Call stub given the number of arguments it expects on (and
// removes from) the stack.
- Result CallStub(CodeStub* stub, int arg_count);
+ Result CallStub(CodeStub* stub, int arg_count) {
+ PrepareForCall(arg_count, arg_count);
+ return RawCallStub(stub);
+ }
// Call stub that expects its argument in r0. The argument is given
// as a result which must be the register r0.
@@ -297,7 +344,7 @@ class VirtualFrame : public Malloced {
void Drop() { Drop(1); }
// Duplicate the top element of the frame.
- void Dup() { PushFrameSlotAt(elements_.length() - 1); }
+ void Dup() { PushFrameSlotAt(element_count() - 1); }
// Pop an element from the top of the expression stack. Returns a
// Result, which may be a constant or a register.
@@ -317,7 +364,15 @@ class VirtualFrame : public Malloced {
void Push(Smi* value) { Push(Handle<Object>(value)); }
// Pushing a result invalidates it (its contents become owned by the frame).
- void Push(Result* result);
+ void Push(Result* result) {
+ if (result->is_register()) {
+ Push(result->reg(), result->static_type());
+ } else {
+ ASSERT(result->is_constant());
+ Push(result->handle());
+ }
+ result->Unuse();
+ }
// Nip removes zero or more elements from immediately below the top
// of the frame, leaving the previous top-of-frame value on top of
@@ -332,70 +387,69 @@ class VirtualFrame : public Malloced {
static const int kHandlerSize = StackHandlerConstants::kSize / kPointerSize;
static const int kPreallocatedElements = 5 + 8; // 8 expression stack slots.
- CodeGenerator* cgen_;
- MacroAssembler* masm_;
-
- List<FrameElement> elements_;
-
- // The number of frame-allocated locals and parameters respectively.
- int parameter_count_;
- int local_count_;
+ ZoneList<FrameElement> elements_;
// The index of the element that is at the processor's stack pointer
// (the sp register).
int stack_pointer_;
- // The index of the element that is at the processor's frame pointer
- // (the fp register).
- int frame_pointer_;
-
// The index of the register frame element using each register, or
// kIllegalIndex if a register is not on the frame.
- int register_locations_[kNumRegisters];
+ int register_locations_[RegisterAllocator::kNumRegisters];
+
+ // The number of frame-allocated locals and parameters respectively.
+ int parameter_count() { return cgen()->scope()->num_parameters(); }
+ int local_count() { return cgen()->scope()->num_stack_slots(); }
+
+ // The index of the element that is at the processor's frame pointer
+ // (the fp register). The parameters, receiver, function, and context
+ // are below the frame pointer.
+ int frame_pointer() { return parameter_count() + 3; }
// The index of the first parameter. The receiver lies below the first
// parameter.
- int param0_index() const { return 1; }
+ int param0_index() { return 1; }
- // The index of the context slot in the frame.
- int context_index() const {
- ASSERT(frame_pointer_ != kIllegalIndex);
- return frame_pointer_ - 1;
- }
+ // The index of the context slot in the frame. It is immediately
+ // below the frame pointer.
+ int context_index() { return frame_pointer() - 1; }
- // The index of the function slot in the frame. It lies above the context
- // slot.
- int function_index() const {
- ASSERT(frame_pointer_ != kIllegalIndex);
- return frame_pointer_ - 2;
- }
+ // The index of the function slot in the frame. It is below the frame
+ // pointer and context slot.
+ int function_index() { return frame_pointer() - 2; }
- // The index of the first local. Between the parameters and the locals
- // lie the return address, the saved frame pointer, the context, and the
- // function.
- int local0_index() const {
- ASSERT(frame_pointer_ != kIllegalIndex);
- return frame_pointer_ + 2;
- }
+ // The index of the first local. Between the frame pointer and the
+ // locals lies the return address.
+ int local0_index() { return frame_pointer() + 2; }
// The index of the base of the expression stack.
- int expression_base_index() const { return local0_index() + local_count_; }
+ int expression_base_index() { return local0_index() + local_count(); }
// Convert a frame index into a frame pointer relative offset into the
// actual stack.
- int fp_relative(int index) const {
- return (frame_pointer_ - index) * kPointerSize;
+ int fp_relative(int index) {
+ ASSERT(index < element_count());
+ ASSERT(frame_pointer() < element_count()); // FP is on the frame.
+ return (frame_pointer() - index) * kPointerSize;
}
// Record an occurrence of a register in the virtual frame. This has the
// effect of incrementing the register's external reference count and
// of updating the index of the register's location in the frame.
- void Use(Register reg, int index);
+ void Use(Register reg, int index) {
+ ASSERT(!is_used(reg));
+ set_register_location(reg, index);
+ cgen()->allocator()->Use(reg);
+ }
// Record that a register reference has been dropped from the frame. This
// decrements the register's external reference count and invalidates the
// index of the register's location in the frame.
- void Unuse(Register reg);
+ void Unuse(Register reg) {
+ ASSERT(is_used(reg));
+ set_register_location(reg, kIllegalIndex);
+ cgen()->allocator()->Unuse(reg);
+ }
// Spill the element at a particular index---write it to memory if
// necessary, free any associated register, and forget its value if
@@ -407,7 +461,7 @@ class VirtualFrame : public Malloced {
// Keep the element type as register or constant, and clear the dirty bit.
void SyncElementAt(int index);
- // Sync the range of elements in [begin, end).
+ // Sync the range of elements in [begin, end] with memory.
void SyncRange(int begin, int end);
// Sync a single unsynced element that lies beneath or at the stack pointer.
@@ -471,6 +525,8 @@ class VirtualFrame : public Malloced {
bool Equals(VirtualFrame* other);
+ // Classes that need raw access to the elements_ array.
+ friend class DeferredCode;
friend class JumpTarget;
};
diff --git a/deps/v8/src/assembler.cc b/deps/v8/src/assembler.cc
index ec0e4fd14..5dba75d2d 100644
--- a/deps/v8/src/assembler.cc
+++ b/deps/v8/src/assembler.cc
@@ -43,7 +43,8 @@
#include "stub-cache.h"
#include "regexp-stack.h"
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
// -----------------------------------------------------------------------------
@@ -90,13 +91,13 @@ int Label::pos() const {
// bits, the lowest 7 bits written first.
//
// data-jump + pos: 00 1110 11,
-// signed int, lowest byte written first
+// signed intptr_t, lowest byte written first
//
// data-jump + st.pos: 01 1110 11,
-// signed int, lowest byte written first
+// signed intptr_t, lowest byte written first
//
// data-jump + comm.: 10 1110 11,
-// signed int, lowest byte written first
+// signed intptr_t, lowest byte written first
//
const int kMaxRelocModes = 14;
@@ -158,7 +159,7 @@ void RelocInfoWriter::WriteTaggedPC(uint32_t pc_delta, int tag) {
}
-void RelocInfoWriter::WriteTaggedData(int32_t data_delta, int tag) {
+void RelocInfoWriter::WriteTaggedData(intptr_t data_delta, int tag) {
*--pos_ = data_delta << kPositionTypeTagBits | tag;
}
@@ -178,11 +179,12 @@ void RelocInfoWriter::WriteExtraTaggedPC(uint32_t pc_delta, int extra_tag) {
}
-void RelocInfoWriter::WriteExtraTaggedData(int32_t data_delta, int top_tag) {
+void RelocInfoWriter::WriteExtraTaggedData(intptr_t data_delta, int top_tag) {
WriteExtraTag(kDataJumpTag, top_tag);
- for (int i = 0; i < kIntSize; i++) {
+ for (int i = 0; i < kIntptrSize; i++) {
*--pos_ = data_delta;
- data_delta = ArithmeticShiftRight(data_delta, kBitsPerByte);
+ // Signed right shift is arithmetic shift. Tested in test-utils.cc.
+ data_delta = data_delta >> kBitsPerByte;
}
}
@@ -205,11 +207,13 @@ void RelocInfoWriter::Write(const RelocInfo* rinfo) {
WriteTaggedPC(pc_delta, kCodeTargetTag);
} else if (RelocInfo::IsPosition(rmode)) {
// Use signed delta-encoding for data.
- int32_t data_delta = rinfo->data() - last_data_;
+ intptr_t data_delta = rinfo->data() - last_data_;
int pos_type_tag = rmode == RelocInfo::POSITION ? kNonstatementPositionTag
: kStatementPositionTag;
// Check if data is small enough to fit in a tagged byte.
- if (is_intn(data_delta, kSmallDataBits)) {
+ // We cannot use is_intn because data_delta is not an int32_t.
+ if (data_delta >= -(1 << (kSmallDataBits-1)) &&
+ data_delta < 1 << (kSmallDataBits-1)) {
WriteTaggedPC(pc_delta, kPositionTag);
WriteTaggedData(data_delta, pos_type_tag);
last_data_ = rinfo->data();
@@ -263,9 +267,9 @@ inline void RelocIterator::AdvanceReadPC() {
void RelocIterator::AdvanceReadData() {
- int32_t x = 0;
- for (int i = 0; i < kIntSize; i++) {
- x |= *--pos_ << i * kBitsPerByte;
+ intptr_t x = 0;
+ for (int i = 0; i < kIntptrSize; i++) {
+ x |= static_cast<intptr_t>(*--pos_) << i * kBitsPerByte;
}
rinfo_.data_ += x;
}
@@ -294,7 +298,8 @@ inline int RelocIterator::GetPositionTypeTag() {
inline void RelocIterator::ReadTaggedData() {
int8_t signed_b = *pos_;
- rinfo_.data_ += ArithmeticShiftRight(signed_b, kPositionTypeTagBits);
+ // Signed right shift is arithmetic shift. Tested in test-utils.cc.
+ rinfo_.data_ += signed_b >> kPositionTypeTagBits;
}
diff --git a/deps/v8/src/assembler.h b/deps/v8/src/assembler.h
index 8abdbc767..66f952adb 100644
--- a/deps/v8/src/assembler.h
+++ b/deps/v8/src/assembler.h
@@ -40,7 +40,8 @@
#include "zone-inl.h"
#include "token.h"
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
// -----------------------------------------------------------------------------
@@ -271,8 +272,8 @@ class RelocInfoWriter BASE_EMBEDDED {
inline uint32_t WriteVariableLengthPCJump(uint32_t pc_delta);
inline void WriteTaggedPC(uint32_t pc_delta, int tag);
inline void WriteExtraTaggedPC(uint32_t pc_delta, int extra_tag);
- inline void WriteExtraTaggedData(int32_t data_delta, int top_tag);
- inline void WriteTaggedData(int32_t data_delta, int tag);
+ inline void WriteExtraTaggedData(intptr_t data_delta, int top_tag);
+ inline void WriteTaggedData(intptr_t data_delta, int tag);
inline void WriteExtraTag(int extra_tag, int top_tag);
byte* pos_;
@@ -423,8 +424,6 @@ class ExternalReference BASE_EMBEDDED {
// -----------------------------------------------------------------------------
// Utility functions
-// Move these into inline file?
-
static inline bool is_intn(int x, int n) {
return -(1 << (n-1)) <= x && x < (1 << (n-1));
}
@@ -436,9 +435,11 @@ static inline bool is_uintn(int x, int n) {
return (x & -(1 << n)) == 0;
}
+static inline bool is_uint2(int x) { return is_uintn(x, 2); }
static inline bool is_uint3(int x) { return is_uintn(x, 3); }
static inline bool is_uint4(int x) { return is_uintn(x, 4); }
static inline bool is_uint5(int x) { return is_uintn(x, 5); }
+static inline bool is_uint6(int x) { return is_uintn(x, 6); }
static inline bool is_uint8(int x) { return is_uintn(x, 8); }
static inline bool is_uint12(int x) { return is_uintn(x, 12); }
static inline bool is_uint16(int x) { return is_uintn(x, 16); }
diff --git a/deps/v8/src/ast.cc b/deps/v8/src/ast.cc
index d19e3b3e0..eef8da715 100644
--- a/deps/v8/src/ast.cc
+++ b/deps/v8/src/ast.cc
@@ -31,7 +31,8 @@
#include "scopes.h"
#include "string-stream.h"
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
VariableProxySentinel VariableProxySentinel::this_proxy_(true);
diff --git a/deps/v8/src/ast.h b/deps/v8/src/ast.h
index 6a2f67105..80a4aa5f2 100644
--- a/deps/v8/src/ast.h
+++ b/deps/v8/src/ast.h
@@ -37,7 +37,8 @@
#include "jsregexp.h"
#include "jump-target.h"
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
// The abstract syntax tree is an intermediate, light-weight
// representation of the parsed JavaScript code suitable for
diff --git a/deps/v8/src/bootstrapper.cc b/deps/v8/src/bootstrapper.cc
index 09cf68dea..89c92b069 100644
--- a/deps/v8/src/bootstrapper.cc
+++ b/deps/v8/src/bootstrapper.cc
@@ -37,7 +37,8 @@
#include "macro-assembler.h"
#include "natives.h"
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
// A SourceCodeCache uses a FixedArray to store pairs of
// (AsciiString*, JSFunction*), mapping names of native code files
@@ -46,7 +47,7 @@ namespace v8 { namespace internal {
// generate an index for each native JS file.
class SourceCodeCache BASE_EMBEDDED {
public:
- explicit SourceCodeCache(ScriptType type): type_(type) { }
+ explicit SourceCodeCache(Script::Type type): type_(type) { }
void Initialize(bool create_heap_objects) {
if (create_heap_objects) {
@@ -88,13 +89,13 @@ class SourceCodeCache BASE_EMBEDDED {
}
private:
- ScriptType type_;
+ Script::Type type_;
FixedArray* cache_;
DISALLOW_COPY_AND_ASSIGN(SourceCodeCache);
};
-static SourceCodeCache natives_cache(SCRIPT_TYPE_NATIVE);
-static SourceCodeCache extensions_cache(SCRIPT_TYPE_EXTENSION);
+static SourceCodeCache natives_cache(Script::TYPE_NATIVE);
+static SourceCodeCache extensions_cache(Script::TYPE_EXTENSION);
Handle<String> Bootstrapper::NativesSourceLookup(int index) {
@@ -521,7 +522,7 @@ void Genesis::CreateRoots(v8::Handle<v8::ObjectTemplate> global_template,
empty_function->set_code(*code);
Handle<String> source = Factory::NewStringFromAscii(CStrVector("() {}"));
Handle<Script> script = Factory::NewScript(source);
- script->set_type(Smi::FromInt(SCRIPT_TYPE_NATIVE));
+ script->set_type(Smi::FromInt(Script::TYPE_NATIVE));
empty_function->shared()->set_script(*script);
empty_function->shared()->set_start_position(0);
empty_function->shared()->set_end_position(source->length());
@@ -820,14 +821,28 @@ void Genesis::CreateRoots(v8::Handle<v8::ObjectTemplate> global_template,
global_context()->set_context_extension_function(*context_extension_fun);
}
- // Setup the call-as-function delegate.
- Handle<Code> code =
- Handle<Code>(Builtins::builtin(Builtins::HandleApiCallAsFunction));
- Handle<JSFunction> delegate =
- Factory::NewFunction(Factory::empty_symbol(), JS_OBJECT_TYPE,
- JSObject::kHeaderSize, code, true);
- global_context()->set_call_as_function_delegate(*delegate);
- delegate->shared()->DontAdaptArguments();
+
+ {
+ // Setup the call-as-function delegate.
+ Handle<Code> code =
+ Handle<Code>(Builtins::builtin(Builtins::HandleApiCallAsFunction));
+ Handle<JSFunction> delegate =
+ Factory::NewFunction(Factory::empty_symbol(), JS_OBJECT_TYPE,
+ JSObject::kHeaderSize, code, true);
+ global_context()->set_call_as_function_delegate(*delegate);
+ delegate->shared()->DontAdaptArguments();
+ }
+
+ {
+ // Setup the call-as-constructor delegate.
+ Handle<Code> code =
+ Handle<Code>(Builtins::builtin(Builtins::HandleApiCallAsConstructor));
+ Handle<JSFunction> delegate =
+ Factory::NewFunction(Factory::empty_symbol(), JS_OBJECT_TYPE,
+ JSObject::kHeaderSize, code, true);
+ global_context()->set_call_as_constructor_delegate(*delegate);
+ delegate->shared()->DontAdaptArguments();
+ }
global_context()->set_special_function_table(Heap::empty_fixed_array());
@@ -1047,6 +1062,14 @@ bool Genesis::InstallNatives() {
Factory::LookupAsciiSymbol("type"),
proxy_type,
common_attributes);
+ Handle<Proxy> proxy_compilation_type =
+ Factory::NewProxy(&Accessors::ScriptCompilationType);
+ script_descriptors =
+ Factory::CopyAppendProxyDescriptor(
+ script_descriptors,
+ Factory::LookupAsciiSymbol("compilation_type"),
+ proxy_compilation_type,
+ common_attributes);
Handle<Proxy> proxy_line_ends =
Factory::NewProxy(&Accessors::ScriptLineEnds);
script_descriptors =
@@ -1063,16 +1086,38 @@ bool Genesis::InstallNatives() {
Factory::LookupAsciiSymbol("context_data"),
proxy_context_data,
common_attributes);
+ Handle<Proxy> proxy_eval_from_function =
+ Factory::NewProxy(&Accessors::ScriptEvalFromFunction);
+ script_descriptors =
+ Factory::CopyAppendProxyDescriptor(
+ script_descriptors,
+ Factory::LookupAsciiSymbol("eval_from_function"),
+ proxy_eval_from_function,
+ common_attributes);
+ Handle<Proxy> proxy_eval_from_position =
+ Factory::NewProxy(&Accessors::ScriptEvalFromPosition);
+ script_descriptors =
+ Factory::CopyAppendProxyDescriptor(
+ script_descriptors,
+ Factory::LookupAsciiSymbol("eval_from_position"),
+ proxy_eval_from_position,
+ common_attributes);
Handle<Map> script_map = Handle<Map>(script_fun->initial_map());
script_map->set_instance_descriptors(*script_descriptors);
// Allocate the empty script.
Handle<Script> script = Factory::NewScript(Factory::empty_string());
- script->set_type(Smi::FromInt(SCRIPT_TYPE_NATIVE));
+ script->set_type(Smi::FromInt(Script::TYPE_NATIVE));
global_context()->set_empty_script(*script);
}
+#ifdef V8_HOST_ARCH_64_BIT
+ // TODO(X64): Reenable remaining initialization when code generation works.
+ return true;
+#endif // V8_HOST_ARCH_64_BIT
+
+
if (FLAG_natives_file == NULL) {
// Without natives file, install default natives.
for (int i = Natives::GetDelayCount();
@@ -1509,8 +1554,8 @@ Genesis::Genesis(Handle<Object> global_object,
current_ = this;
result_ = NULL;
- // If V8 hasn't been and cannot be initialized, just return.
- if (!V8::HasBeenSetup() && !V8::Initialize(NULL)) return;
+ // If V8 isn't running and cannot be initialized, just return.
+ if (!V8::IsRunning() && !V8::Initialize(NULL)) return;
// Before creating the roots we must save the context and restore it
// on all function exits.
@@ -1518,6 +1563,7 @@ Genesis::Genesis(Handle<Object> global_object,
SaveContext context;
CreateRoots(global_template, global_object);
+
if (!InstallNatives()) return;
MakeFunctionInstancePrototypeWritable();
diff --git a/deps/v8/src/bootstrapper.h b/deps/v8/src/bootstrapper.h
index e2883dc0f..0d743e388 100644
--- a/deps/v8/src/bootstrapper.h
+++ b/deps/v8/src/bootstrapper.h
@@ -29,7 +29,8 @@
#ifndef V8_BOOTSTRAPPER_H_
#define V8_BOOTSTRAPPER_H_
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
// The Boostrapper is the public interface for creating a JavaScript global
// context.
diff --git a/deps/v8/src/builtins.cc b/deps/v8/src/builtins.cc
index b27974ffd..1c43f7a4b 100644
--- a/deps/v8/src/builtins.cc
+++ b/deps/v8/src/builtins.cc
@@ -32,7 +32,8 @@
#include "builtins.h"
#include "ic-inl.h"
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
// ----------------------------------------------------------------------------
// Support macros for defining builtins in C.
@@ -394,12 +395,18 @@ BUILTIN(HandleApiCall) {
BUILTIN_END
-// Handle calls to non-function objects created through the API that
-// support calls.
-BUILTIN(HandleApiCallAsFunction) {
- // Non-functions are never called as constructors.
+// Helper function to handle calls to non-function objects created through the
+// API. The object can be called as either a constructor (using new) or just as
+// a function (without new).
+static Object* HandleApiCallAsFunctionOrConstructor(bool is_construct_call,
+ int __argc__,
+ Object** __argv__) {
+ // Non-functions are never called as constructors. Even if this is an object
+ // called as a constructor the delegate call is not a construct call.
ASSERT(!CalledAsConstructor());
+ Handle<Object> receiver(&__argv__[0]);
+
// Get the object called.
JSObject* obj = JSObject::cast(*receiver);
@@ -431,7 +438,7 @@ BUILTIN(HandleApiCallAsFunction) {
data,
self,
callee,
- false,
+ is_construct_call,
reinterpret_cast<void**>(__argv__ - 1),
__argc__ - 1);
v8::Handle<v8::Value> value;
@@ -450,6 +457,21 @@ BUILTIN(HandleApiCallAsFunction) {
RETURN_IF_SCHEDULED_EXCEPTION();
return result;
}
+
+
+// Handle calls to non-function objects created through the API. This delegate
+// function is used when the call is a normal function call.
+BUILTIN(HandleApiCallAsFunction) {
+ return HandleApiCallAsFunctionOrConstructor(false, __argc__, __argv__);
+}
+BUILTIN_END
+
+
+// Handle calls to non-function objects created through the API. This delegate
+// function is used when the call is a construct call.
+BUILTIN(HandleApiCallAsConstructor) {
+ return HandleApiCallAsFunctionOrConstructor(true, __argc__, __argv__);
+}
BUILTIN_END
@@ -644,12 +666,12 @@ void Builtins::Setup(bool create_heap_objects) {
Code::ComputeFlags(Code::BUILTIN) \
},
-#define DEF_FUNCTION_PTR_A(name, kind, state) \
- { FUNCTION_ADDR(Generate_##name), \
- NULL, \
- #name, \
- name, \
- Code::ComputeFlags(Code::kind, state) \
+#define DEF_FUNCTION_PTR_A(name, kind, state) \
+ { FUNCTION_ADDR(Generate_##name), \
+ NULL, \
+ #name, \
+ name, \
+ Code::ComputeFlags(Code::kind, NOT_IN_LOOP, state) \
},
// Define array of pointers to generators and C builtin functions.
diff --git a/deps/v8/src/builtins.h b/deps/v8/src/builtins.h
index 4e74a3cc4..6e0f83256 100644
--- a/deps/v8/src/builtins.h
+++ b/deps/v8/src/builtins.h
@@ -28,7 +28,8 @@
#ifndef V8_BUILTINS_H_
#define V8_BUILTINS_H_
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
// Define list of builtins implemented in C.
#define BUILTIN_LIST_C(V) \
@@ -42,7 +43,8 @@ namespace v8 { namespace internal {
V(ArrayPop) \
\
V(HandleApiCall) \
- V(HandleApiCallAsFunction)
+ V(HandleApiCallAsFunction) \
+ V(HandleApiCallAsConstructor)
// Define list of builtins implemented in assembly.
@@ -99,35 +101,36 @@ namespace v8 { namespace internal {
#endif
// Define list of builtins implemented in JavaScript.
-#define BUILTINS_LIST_JS(V) \
- V(EQUALS, 1) \
- V(STRICT_EQUALS, 1) \
- V(COMPARE, 2) \
- V(ADD, 1) \
- V(SUB, 1) \
- V(MUL, 1) \
- V(DIV, 1) \
- V(MOD, 1) \
- V(BIT_OR, 1) \
- V(BIT_AND, 1) \
- V(BIT_XOR, 1) \
- V(UNARY_MINUS, 0) \
- V(BIT_NOT, 0) \
- V(SHL, 1) \
- V(SAR, 1) \
- V(SHR, 1) \
- V(DELETE, 1) \
- V(IN, 1) \
- V(INSTANCE_OF, 1) \
- V(GET_KEYS, 0) \
- V(FILTER_KEY, 1) \
- V(CALL_NON_FUNCTION, 0) \
- V(TO_OBJECT, 0) \
- V(TO_NUMBER, 0) \
- V(TO_STRING, 0) \
- V(STRING_ADD_LEFT, 1) \
- V(STRING_ADD_RIGHT, 1) \
- V(APPLY_PREPARE, 1) \
+#define BUILTINS_LIST_JS(V) \
+ V(EQUALS, 1) \
+ V(STRICT_EQUALS, 1) \
+ V(COMPARE, 2) \
+ V(ADD, 1) \
+ V(SUB, 1) \
+ V(MUL, 1) \
+ V(DIV, 1) \
+ V(MOD, 1) \
+ V(BIT_OR, 1) \
+ V(BIT_AND, 1) \
+ V(BIT_XOR, 1) \
+ V(UNARY_MINUS, 0) \
+ V(BIT_NOT, 0) \
+ V(SHL, 1) \
+ V(SAR, 1) \
+ V(SHR, 1) \
+ V(DELETE, 1) \
+ V(IN, 1) \
+ V(INSTANCE_OF, 1) \
+ V(GET_KEYS, 0) \
+ V(FILTER_KEY, 1) \
+ V(CALL_NON_FUNCTION, 0) \
+ V(CALL_NON_FUNCTION_AS_CONSTRUCTOR, 0) \
+ V(TO_OBJECT, 0) \
+ V(TO_NUMBER, 0) \
+ V(TO_STRING, 0) \
+ V(STRING_ADD_LEFT, 1) \
+ V(STRING_ADD_RIGHT, 1) \
+ V(APPLY_PREPARE, 1) \
V(APPLY_OVERFLOW, 1)
diff --git a/deps/v8/src/bytecodes-irregexp.h b/deps/v8/src/bytecodes-irregexp.h
index 94f37a8db..bcb34c899 100644
--- a/deps/v8/src/bytecodes-irregexp.h
+++ b/deps/v8/src/bytecodes-irregexp.h
@@ -29,7 +29,8 @@
#ifndef V8_BYTECODES_IRREGEXP_H_
#define V8_BYTECODES_IRREGEXP_H_
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
static const int BYTECODE_MASK = 0xff;
diff --git a/deps/v8/src/char-predicates-inl.h b/deps/v8/src/char-predicates-inl.h
index 217db9c33..fadbc9afb 100644
--- a/deps/v8/src/char-predicates-inl.h
+++ b/deps/v8/src/char-predicates-inl.h
@@ -30,7 +30,8 @@
#include "char-predicates.h"
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
inline bool IsCarriageReturn(uc32 c) {
diff --git a/deps/v8/src/char-predicates.h b/deps/v8/src/char-predicates.h
index 63e83b4d3..dac1eb8fe 100644
--- a/deps/v8/src/char-predicates.h
+++ b/deps/v8/src/char-predicates.h
@@ -28,7 +28,8 @@
#ifndef V8_CHAR_PREDICATES_H_
#define V8_CHAR_PREDICATES_H_
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
// Unicode character predicates as defined by ECMA-262, 3rd,
// used for lexical analysis.
diff --git a/deps/v8/src/code-stubs.cc b/deps/v8/src/code-stubs.cc
index 06c4dcdc2..b14ede18a 100644
--- a/deps/v8/src/code-stubs.cc
+++ b/deps/v8/src/code-stubs.cc
@@ -32,7 +32,8 @@
#include "factory.h"
#include "macro-assembler.h"
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
Handle<Code> CodeStub::GetCode() {
uint32_t key = GetKey();
@@ -58,7 +59,7 @@ Handle<Code> CodeStub::GetCode() {
masm.GetCode(&desc);
// Copy the generated code into a heap object, and store the major key.
- Code::Flags flags = Code::ComputeFlags(Code::STUB);
+ Code::Flags flags = Code::ComputeFlags(Code::STUB, InLoop());
Handle<Code> code = Factory::NewCode(desc, NULL, flags, masm.CodeObject());
code->set_major_key(MajorKey());
diff --git a/deps/v8/src/code-stubs.h b/deps/v8/src/code-stubs.h
index 67634aa13..183a64abe 100644
--- a/deps/v8/src/code-stubs.h
+++ b/deps/v8/src/code-stubs.h
@@ -28,7 +28,8 @@
#ifndef V8_CODE_STUBS_H_
#define V8_CODE_STUBS_H_
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
// Stub is base classes of all stubs.
@@ -82,6 +83,10 @@ class CodeStub BASE_EMBEDDED {
virtual Major MajorKey() = 0;
virtual int MinorKey() = 0;
+ // The CallFunctionStub needs to override this so it can encode whether a
+ // lazily generated function should be fully optimized or not.
+ virtual InLoopFlag InLoop() { return NOT_IN_LOOP; }
+
// Returns a name for logging/debugging purposes.
virtual const char* GetName() { return MajorName(MajorKey()); }
diff --git a/deps/v8/src/code.h b/deps/v8/src/code.h
index 87e079490..072344b67 100644
--- a/deps/v8/src/code.h
+++ b/deps/v8/src/code.h
@@ -28,7 +28,8 @@
#ifndef V8_CODE_H_
#define V8_CODE_H_
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
// Wrapper class for passing expected and actual parameter counts as
diff --git a/deps/v8/src/codegen-inl.h b/deps/v8/src/codegen-inl.h
index c42f5ac64..bee237d8c 100644
--- a/deps/v8/src/codegen-inl.h
+++ b/deps/v8/src/codegen-inl.h
@@ -30,9 +30,23 @@
#define V8_CODEGEN_INL_H_
#include "codegen.h"
+#include "register-allocator-inl.h"
+
+#if V8_TARGET_ARCH_IA32
+#include "ia32/codegen-ia32-inl.h"
+#elif V8_TARGET_ARCH_X64
+#include "x64/codegen-x64-inl.h"
+#elif V8_TARGET_ARCH_ARM
+#include "arm/codegen-arm-inl.h"
+#else
+#error Unsupported target architecture.
+#endif
+
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
+#define __ ACCESS_MASM(masm_)
// -----------------------------------------------------------------------------
// Support for "structured" code comments.
@@ -44,15 +58,12 @@ namespace v8 { namespace internal {
class Comment BASE_EMBEDDED {
public:
- Comment(MacroAssembler* masm, const char* msg)
- : masm_(masm),
- msg_(msg) {
- masm_->RecordComment(msg);
+ Comment(MacroAssembler* masm, const char* msg) : masm_(masm), msg_(msg) {
+ __ RecordComment(msg);
}
~Comment() {
- if (msg_[0] == '[')
- masm_->RecordComment("]");
+ if (msg_[0] == '[') __ RecordComment("]");
}
private:
@@ -69,6 +80,8 @@ class Comment BASE_EMBEDDED {
#endif // DEBUG
+#undef __
+
} } // namespace v8::internal
diff --git a/deps/v8/src/codegen.cc b/deps/v8/src/codegen.cc
index 40c2583f4..f46269fe9 100644
--- a/deps/v8/src/codegen.cc
+++ b/deps/v8/src/codegen.cc
@@ -38,27 +38,41 @@
#include "scopeinfo.h"
#include "stub-cache.h"
-namespace v8 { namespace internal {
-
-DeferredCode::DeferredCode(CodeGenerator* generator)
- : generator_(generator),
- masm_(generator->masm()),
- enter_(generator),
- exit_(generator, JumpTarget::BIDIRECTIONAL),
- statement_position_(masm_->current_statement_position()),
- position_(masm_->current_position()) {
- generator->AddDeferred(this);
+namespace v8 {
+namespace internal {
+
+
+CodeGenerator* CodeGeneratorScope::top_ = NULL;
+
+
+DeferredCode::DeferredCode()
+ : masm_(CodeGeneratorScope::Current()->masm()),
+ statement_position_(masm_->current_statement_position()),
+ position_(masm_->current_position()) {
ASSERT(statement_position_ != RelocInfo::kNoPosition);
ASSERT(position_ != RelocInfo::kNoPosition);
+
+ CodeGeneratorScope::Current()->AddDeferred(this);
#ifdef DEBUG
comment_ = "";
#endif
-}
-
-void CodeGenerator::ClearDeferred() {
- for (int i = 0; i < deferred_.length(); i++) {
- deferred_[i]->Clear();
+ // Copy the register locations from the code generator's frame.
+ // These are the registers that will be spilled on entry to the
+ // deferred code and restored on exit.
+ VirtualFrame* frame = CodeGeneratorScope::Current()->frame();
+ int sp_offset = frame->fp_relative(frame->stack_pointer_);
+ for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
+ int loc = frame->register_location(i);
+ if (loc == VirtualFrame::kIllegalIndex) {
+ registers_[i] = kIgnore;
+ } else if (frame->elements_[loc].is_synced()) {
+ // Needs to be restored on exit but not saved on entry.
+ registers_[i] = frame->fp_relative(loc) | kSyncedFlag;
+ } else {
+ int offset = frame->fp_relative(loc);
+ registers_[i] = (offset < sp_offset) ? kPush : offset;
+ }
}
}
@@ -66,17 +80,19 @@ void CodeGenerator::ClearDeferred() {
void CodeGenerator::ProcessDeferred() {
while (!deferred_.is_empty()) {
DeferredCode* code = deferred_.RemoveLast();
- MacroAssembler* masm = code->masm();
+ ASSERT(masm_ == code->masm());
// Record position of deferred code stub.
- masm->RecordStatementPosition(code->statement_position());
+ masm_->RecordStatementPosition(code->statement_position());
if (code->position() != RelocInfo::kNoPosition) {
- masm->RecordPosition(code->position());
+ masm_->RecordPosition(code->position());
}
// Generate the code.
- Comment cmnt(masm, code->comment());
+ Comment cmnt(masm_, code->comment());
+ masm_->bind(code->entry_label());
+ code->SaveRegisters();
code->Generate();
- ASSERT(code->enter()->is_bound());
- code->Clear();
+ code->RestoreRegisters();
+ masm_->jmp(code->exit_label());
}
}
@@ -104,7 +120,6 @@ void CodeGenerator::SetFrame(VirtualFrame* new_frame,
void CodeGenerator::DeleteFrame() {
if (has_valid_frame()) {
frame_->DetachFromCodeGenerator();
- delete frame_;
frame_ = NULL;
}
}
@@ -155,17 +170,21 @@ Handle<Code> CodeGenerator::MakeCode(FunctionLiteral* flit,
// Generate code.
const int initial_buffer_size = 4 * KB;
CodeGenerator cgen(initial_buffer_size, script, is_eval);
+ CodeGeneratorScope scope(&cgen);
cgen.GenCode(flit);
if (cgen.HasStackOverflow()) {
ASSERT(!Top::has_pending_exception());
return Handle<Code>::null();
}
- // Allocate and install the code.
+ // Allocate and install the code. Time the rest of this function as
+ // code creation.
+ HistogramTimerScope timer(&Counters::code_creation);
CodeDesc desc;
cgen.masm()->GetCode(&desc);
- ScopeInfo<> sinfo(flit->scope());
- Code::Flags flags = Code::ComputeFlags(Code::FUNCTION);
+ ZoneScopeInfo sinfo(flit->scope());
+ InLoopFlag in_loop = (cgen.loop_nesting() != 0) ? IN_LOOP : NOT_IN_LOOP;
+ Code::Flags flags = Code::ComputeFlags(Code::FUNCTION, in_loop);
Handle<Code> code = Factory::NewCode(desc,
&sinfo,
flags,
@@ -206,7 +225,7 @@ Handle<Code> CodeGenerator::MakeCode(FunctionLiteral* flit,
bool CodeGenerator::ShouldGenerateLog(Expression* type) {
ASSERT(type != NULL);
- if (!Logger::is_enabled()) return false;
+ if (!Logger::IsEnabled()) return false;
Handle<String> name = Handle<String>::cast(type->AsLiteral()->handle());
if (FLAG_log_regexp) {
static Vector<const char> kRegexp = CStrVector("regexp");
@@ -317,17 +336,18 @@ Handle<JSFunction> CodeGenerator::BuildBoilerplate(FunctionLiteral* node) {
}
-Handle<Code> CodeGenerator::ComputeCallInitialize(int argc) {
- CALL_HEAP_FUNCTION(StubCache::ComputeCallInitialize(argc), Code);
-}
-
-
-Handle<Code> CodeGenerator::ComputeCallInitializeInLoop(int argc) {
- // Force the creation of the corresponding stub outside loops,
- // because it will be used when clearing the ICs later - when we
- // don't know if we're inside a loop or not.
- ComputeCallInitialize(argc);
- CALL_HEAP_FUNCTION(StubCache::ComputeCallInitializeInLoop(argc), Code);
+Handle<Code> CodeGenerator::ComputeCallInitialize(
+ int argc,
+ InLoopFlag in_loop) {
+ if (in_loop == IN_LOOP) {
+ // Force the creation of the corresponding stub outside loops,
+ // because it may be used when clearing the ICs later - it is
+ // possible for a series of IC transitions to lose the in-loop
+ // information, and the IC clearing code can't generate a stub
+ // that it needs so we need to ensure it is generated already.
+ ComputeCallInitialize(argc, NOT_IN_LOOP);
+ }
+ CALL_HEAP_FUNCTION(StubCache::ComputeCallInitialize(argc, in_loop), Code);
}
@@ -507,8 +527,8 @@ void CodeGenerator::GenerateFastCaseSwitchCases(
// frame. Otherwise, we have to merge the existing one to the
// start frame as part of the previous case.
if (!has_valid_frame()) {
- RegisterFile non_frame_registers = RegisterAllocator::Reserved();
- SetFrame(new VirtualFrame(start_frame), &non_frame_registers);
+ RegisterFile empty;
+ SetFrame(new VirtualFrame(start_frame), &empty);
} else {
frame_->MergeTo(start_frame);
}
diff --git a/deps/v8/src/codegen.h b/deps/v8/src/codegen.h
index a6cd693eb..e1758e1a9 100644
--- a/deps/v8/src/codegen.h
+++ b/deps/v8/src/codegen.h
@@ -52,7 +52,6 @@
// CodeGenerator
// ~CodeGenerator
// ProcessDeferred
-// ClearDeferred
// GenCode
// BuildBoilerplate
// ComputeCallInitialize
@@ -86,14 +85,34 @@ enum OverwriteMode { NO_OVERWRITE, OVERWRITE_LEFT, OVERWRITE_RIGHT };
#include "arm/codegen-arm.h"
#endif
-namespace v8 { namespace internal {
+#include "register-allocator.h"
+namespace v8 {
+namespace internal {
-// Use lazy compilation; defaults to true.
-// NOTE: Do not remove non-lazy compilation until we can properly
-// install extensions with lazy compilation enabled. At the
-// moment, this doesn't work for the extensions in Google3,
-// and we can only run the tests with --nolazy.
+
+// Code generation can be nested. Code generation scopes form a stack
+// of active code generators.
+class CodeGeneratorScope BASE_EMBEDDED {
+ public:
+ explicit CodeGeneratorScope(CodeGenerator* cgen) {
+ previous_ = top_;
+ top_ = cgen;
+ }
+
+ ~CodeGeneratorScope() {
+ top_ = previous_;
+ }
+
+ static CodeGenerator* Current() {
+ ASSERT(top_ != NULL);
+ return top_;
+ }
+
+ private:
+ static CodeGenerator* top_;
+ CodeGenerator* previous_;
+};
// Deferred code objects are small pieces of code that are compiled
@@ -101,52 +120,56 @@ namespace v8 { namespace internal {
// paths thereby avoiding expensive jumps around uncommon code parts.
class DeferredCode: public ZoneObject {
public:
- explicit DeferredCode(CodeGenerator* generator);
+ DeferredCode();
virtual ~DeferredCode() { }
virtual void Generate() = 0;
- // Unuse the entry and exit targets, deallocating all virtual frames
- // held by them. It will be impossible to emit a (correct) jump
- // into or out of the deferred code after clearing.
- void Clear() {
- enter_.Unuse();
- exit_.Unuse();
- }
-
- MacroAssembler* masm() const { return masm_; }
- CodeGenerator* generator() const { return generator_; }
-
- JumpTarget* enter() { return &enter_; }
- void BindExit() { exit_.Bind(0); }
- void BindExit(Result* result) { exit_.Bind(result, 1); }
- void BindExit(Result* result0, Result* result1) {
- exit_.Bind(result0, result1, 2);
- }
- void BindExit(Result* result0, Result* result1, Result* result2) {
- exit_.Bind(result0, result1, result2, 3);
- }
+ MacroAssembler* masm() { return masm_; }
int statement_position() const { return statement_position_; }
int position() const { return position_; }
+ Label* entry_label() { return &entry_label_; }
+ Label* exit_label() { return &exit_label_; }
+
#ifdef DEBUG
void set_comment(const char* comment) { comment_ = comment; }
const char* comment() const { return comment_; }
#else
- inline void set_comment(const char* comment) { }
+ void set_comment(const char* comment) { }
const char* comment() const { return ""; }
#endif
+ inline void Jump();
+ inline void Branch(Condition cc);
+ void BindExit() { masm_->bind(&exit_label_); }
+
+ void SaveRegisters();
+ void RestoreRegisters();
+
protected:
- CodeGenerator* const generator_;
- MacroAssembler* const masm_;
- JumpTarget enter_;
- JumpTarget exit_;
+ MacroAssembler* masm_;
private:
+ // Constants indicating special actions. They should not be multiples
+ // of kPointerSize so they will not collide with valid offsets from
+ // the frame pointer.
+ static const int kIgnore = -1;
+ static const int kPush = 1;
+
+ // This flag is ored with a valid offset from the frame pointer, so
+ // it should fit in the low zero bits of a valid offset.
+ static const int kSyncedFlag = 2;
+
int statement_position_;
int position_;
+
+ Label entry_label_;
+ Label exit_label_;
+
+ int registers_[RegisterAllocator::kNumRegisters];
+
#ifdef DEBUG
const char* comment_;
#endif
diff --git a/deps/v8/src/compilation-cache.cc b/deps/v8/src/compilation-cache.cc
index 4c02d86ce..421b6766f 100644
--- a/deps/v8/src/compilation-cache.cc
+++ b/deps/v8/src/compilation-cache.cc
@@ -29,15 +29,30 @@
#include "compilation-cache.h"
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
enum {
- NUMBER_OF_ENTRY_KINDS = CompilationCache::LAST_ENTRY + 1
+ // The number of script generations tell how many GCs a script can
+ // survive in the compilation cache, before it will be flushed if it
+ // hasn't been used.
+ NUMBER_OF_SCRIPT_GENERATIONS = 5,
+
+ // The compilation cache consists of tables - one for each entry
+ // kind plus extras for the script generations.
+ NUMBER_OF_TABLE_ENTRIES =
+ CompilationCache::LAST_ENTRY + NUMBER_OF_SCRIPT_GENERATIONS
};
+// Current enable state of the compilation cache.
+static bool enabled = true;
+static inline bool IsEnabled() {
+ return FLAG_compilation_cache && enabled;
+}
+
// Keep separate tables for the different entry kinds.
-static Object* tables[NUMBER_OF_ENTRY_KINDS] = { 0, };
+static Object* tables[NUMBER_OF_TABLE_ENTRIES] = { 0, };
static Handle<CompilationCacheTable> AllocateTable(int size) {
@@ -46,14 +61,15 @@ static Handle<CompilationCacheTable> AllocateTable(int size) {
}
-static Handle<CompilationCacheTable> GetTable(CompilationCache::Entry entry) {
+static Handle<CompilationCacheTable> GetTable(int index) {
+ ASSERT(index >= 0 && index < NUMBER_OF_TABLE_ENTRIES);
Handle<CompilationCacheTable> result;
- if (tables[entry]->IsUndefined()) {
+ if (tables[index]->IsUndefined()) {
static const int kInitialCacheSize = 64;
result = AllocateTable(kInitialCacheSize);
- tables[entry] = *result;
+ tables[index] = *result;
} else {
- CompilationCacheTable* table = CompilationCacheTable::cast(tables[entry]);
+ CompilationCacheTable* table = CompilationCacheTable::cast(tables[index]);
result = Handle<CompilationCacheTable>(table);
}
return result;
@@ -121,47 +137,80 @@ static bool HasOrigin(Handle<JSFunction> boilerplate,
}
-static Handle<JSFunction> Lookup(Handle<String> source,
- CompilationCache::Entry entry) {
- // Make sure not to leak the table into the surrounding handle
- // scope. Otherwise, we risk keeping old tables around even after
- // having cleared the cache.
- Object* result;
- { HandleScope scope;
- Handle<CompilationCacheTable> table = GetTable(entry);
- result = table->Lookup(*source);
- }
- if (result->IsJSFunction()) {
- return Handle<JSFunction>(JSFunction::cast(result));
- } else {
- return Handle<JSFunction>::null();
- }
-}
-
-
-// TODO(245): Need to allow identical code from different contexts to be
-// cached. Currently the first use will be cached, but subsequent code
-// from different source / line won't.
+// TODO(245): Need to allow identical code from different contexts to
+// be cached in the same script generation. Currently the first use
+// will be cached, but subsequent code from different source / line
+// won't.
Handle<JSFunction> CompilationCache::LookupScript(Handle<String> source,
Handle<Object> name,
int line_offset,
int column_offset) {
- Handle<JSFunction> result = Lookup(source, SCRIPT);
- if (result.is_null()) {
- Counters::compilation_cache_misses.Increment();
- } else if (HasOrigin(result, name, line_offset, column_offset)) {
+ if (!IsEnabled()) {
+ return Handle<JSFunction>::null();
+ }
+
+ // Use an int for the generation index, so value range propagation
+ // in gcc 4.3+ won't assume it can only go up to LAST_ENTRY when in
+ // fact it can go up to SCRIPT + NUMBER_OF_SCRIPT_GENERATIONS.
+ int generation = SCRIPT;
+ Object* result = NULL;
+
+ // Probe the script generation tables. Make sure not to leak handles
+ // into the caller's handle scope.
+ { HandleScope scope;
+ while (generation < SCRIPT + NUMBER_OF_SCRIPT_GENERATIONS) {
+ Handle<CompilationCacheTable> table = GetTable(generation);
+ Handle<Object> probe(table->Lookup(*source));
+ if (probe->IsJSFunction()) {
+ Handle<JSFunction> boilerplate = Handle<JSFunction>::cast(probe);
+ // Break when we've found a suitable boilerplate function that
+ // matches the origin.
+ if (HasOrigin(boilerplate, name, line_offset, column_offset)) {
+ result = *boilerplate;
+ break;
+ }
+ }
+ // Go to the next generation.
+ generation++;
+ }
+ }
+
+ static void* script_histogram = StatsTable::CreateHistogram(
+ "V8.ScriptCache",
+ 0,
+ NUMBER_OF_SCRIPT_GENERATIONS,
+ NUMBER_OF_SCRIPT_GENERATIONS + 1);
+
+ if (script_histogram != NULL) {
+ // The level NUMBER_OF_SCRIPT_GENERATIONS is equivalent to a cache miss.
+ StatsTable::AddHistogramSample(script_histogram, generation - SCRIPT);
+ }
+
+ // Once outside the manacles of the handle scope, we need to recheck
+ // to see if we actually found a cached script. If so, we return a
+ // handle created in the caller's handle scope.
+ if (result != NULL) {
+ Handle<JSFunction> boilerplate(JSFunction::cast(result));
+ ASSERT(HasOrigin(boilerplate, name, line_offset, column_offset));
+ // If the script was found in a later generation, we promote it to
+ // the first generation to let it survive longer in the cache.
+ if (generation != SCRIPT) PutScript(source, boilerplate);
Counters::compilation_cache_hits.Increment();
+ return boilerplate;
} else {
- result = Handle<JSFunction>::null();
Counters::compilation_cache_misses.Increment();
+ return Handle<JSFunction>::null();
}
- return result;
}
Handle<JSFunction> CompilationCache::LookupEval(Handle<String> source,
Handle<Context> context,
Entry entry) {
+ if (!IsEnabled()) {
+ return Handle<JSFunction>::null();
+ }
+
ASSERT(entry == EVAL_GLOBAL || entry == EVAL_CONTEXTUAL);
Handle<JSFunction> result = Lookup(source, context, entry);
if (result.is_null()) {
@@ -175,6 +224,10 @@ Handle<JSFunction> CompilationCache::LookupEval(Handle<String> source,
Handle<FixedArray> CompilationCache::LookupRegExp(Handle<String> source,
JSRegExp::Flags flags) {
+ if (!IsEnabled()) {
+ return Handle<FixedArray>::null();
+ }
+
Handle<FixedArray> result = Lookup(source, flags);
if (result.is_null()) {
Counters::compilation_cache_misses.Increment();
@@ -187,6 +240,10 @@ Handle<FixedArray> CompilationCache::LookupRegExp(Handle<String> source,
void CompilationCache::PutScript(Handle<String> source,
Handle<JSFunction> boilerplate) {
+ if (!IsEnabled()) {
+ return;
+ }
+
HandleScope scope;
ASSERT(boilerplate->IsBoilerplate());
Handle<CompilationCacheTable> table = GetTable(SCRIPT);
@@ -198,6 +255,10 @@ void CompilationCache::PutEval(Handle<String> source,
Handle<Context> context,
Entry entry,
Handle<JSFunction> boilerplate) {
+ if (!IsEnabled()) {
+ return;
+ }
+
HandleScope scope;
ASSERT(boilerplate->IsBoilerplate());
Handle<CompilationCacheTable> table = GetTable(entry);
@@ -209,6 +270,10 @@ void CompilationCache::PutEval(Handle<String> source,
void CompilationCache::PutRegExp(Handle<String> source,
JSRegExp::Flags flags,
Handle<FixedArray> data) {
+ if (!IsEnabled()) {
+ return;
+ }
+
HandleScope scope;
Handle<CompilationCacheTable> table = GetTable(REGEXP);
CALL_HEAP_FUNCTION_VOID(table->PutRegExp(*source, flags, *data));
@@ -216,14 +281,36 @@ void CompilationCache::PutRegExp(Handle<String> source,
void CompilationCache::Clear() {
- for (int i = 0; i < NUMBER_OF_ENTRY_KINDS; i++) {
+ for (int i = 0; i < NUMBER_OF_TABLE_ENTRIES; i++) {
tables[i] = Heap::undefined_value();
}
}
void CompilationCache::Iterate(ObjectVisitor* v) {
- v->VisitPointers(&tables[0], &tables[NUMBER_OF_ENTRY_KINDS]);
+ v->VisitPointers(&tables[0], &tables[NUMBER_OF_TABLE_ENTRIES]);
+}
+
+
+void CompilationCache::MarkCompactPrologue() {
+ ASSERT(LAST_ENTRY == SCRIPT);
+ for (int i = NUMBER_OF_TABLE_ENTRIES - 1; i > SCRIPT; i--) {
+ tables[i] = tables[i - 1];
+ }
+ for (int j = 0; j <= LAST_ENTRY; j++) {
+ tables[j] = Heap::undefined_value();
+ }
+}
+
+
+void CompilationCache::Enable() {
+ enabled = true;
+}
+
+
+void CompilationCache::Disable() {
+ enabled = false;
+ Clear();
}
diff --git a/deps/v8/src/compilation-cache.h b/deps/v8/src/compilation-cache.h
index 38a9e3a3a..4545defc5 100644
--- a/deps/v8/src/compilation-cache.h
+++ b/deps/v8/src/compilation-cache.h
@@ -28,7 +28,8 @@
#ifndef V8_COMPILATION_CACHE_H_
#define V8_COMPILATION_CACHE_H_
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
// The compilation cache keeps function boilerplates for compiled
@@ -40,11 +41,11 @@ class CompilationCache {
// scripts and evals. Internally, we use separate caches to avoid
// getting the wrong kind of entry when looking up.
enum Entry {
- SCRIPT,
EVAL_GLOBAL,
EVAL_CONTEXTUAL,
REGEXP,
- LAST_ENTRY = REGEXP
+ SCRIPT,
+ LAST_ENTRY = SCRIPT
};
// Finds the script function boilerplate for a source
@@ -93,10 +94,13 @@ class CompilationCache {
// Notify the cache that a mark-sweep garbage collection is about to
// take place. This is used to retire entries from the cache to
- // avoid keeping them alive too long without using them. For now, we
- // just clear the cache but we should consider are more
- // sophisticated LRU scheme.
- static void MarkCompactPrologue() { Clear(); }
+ // avoid keeping them alive too long without using them.
+ static void MarkCompactPrologue();
+
+ // Enable/disable compilation cache. Used by debugger to disable compilation
+ // cache during debugging to make sure new scripts are always compiled.
+ static void Enable();
+ static void Disable();
};
diff --git a/deps/v8/src/compiler.cc b/deps/v8/src/compiler.cc
index 256c69696..ea7c134da 100644
--- a/deps/v8/src/compiler.cc
+++ b/deps/v8/src/compiler.cc
@@ -37,7 +37,8 @@
#include "scopes.h"
#include "usage-analyzer.h"
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
static Handle<Code> MakeCode(FunctionLiteral* literal,
Handle<Script> script,
@@ -52,12 +53,15 @@ static Handle<Code> MakeCode(FunctionLiteral* literal,
return Handle<Code>::null();
}
- // Compute top scope and allocate variables. For lazy compilation
- // the top scope only contains the single lazily compiled function,
- // so this doesn't re-allocate variables repeatedly.
- Scope* top = literal->scope();
- while (top->outer_scope() != NULL) top = top->outer_scope();
- top->AllocateVariables(context);
+ {
+ // Compute top scope and allocate variables. For lazy compilation
+ // the top scope only contains the single lazily compiled function,
+ // so this doesn't re-allocate variables repeatedly.
+ HistogramTimerScope timer(&Counters::variable_allocation);
+ Scope* top = literal->scope();
+ while (top->outer_scope() != NULL) top = top->outer_scope();
+ top->AllocateVariables(context);
+ }
#ifdef DEBUG
if (Bootstrapper::IsActive() ?
@@ -86,7 +90,7 @@ static bool IsValidJSON(FunctionLiteral* lit) {
Statement* stmt = lit->body()->at(0);
if (stmt->AsExpressionStatement() == NULL)
return false;
- Expression *expr = stmt->AsExpressionStatement()->expression();
+ Expression* expr = stmt->AsExpressionStatement()->expression();
return expr->IsValidJSON();
}
@@ -98,7 +102,7 @@ static Handle<JSFunction> MakeFunction(bool is_global,
Handle<Context> context,
v8::Extension* extension,
ScriptDataImpl* pre_data) {
- ZoneScope zone_scope(DELETE_ON_EXIT);
+ CompilationZoneScope zone_scope(DELETE_ON_EXIT);
// Make sure we have an initial stack limit.
StackGuard guard;
@@ -106,7 +110,22 @@ static Handle<JSFunction> MakeFunction(bool is_global,
ASSERT(!i::Top::global_context().is_null());
script->set_context_data((*i::Top::global_context())->data());
+
#ifdef ENABLE_DEBUGGER_SUPPORT
+ if (is_eval || is_json) {
+ script->set_compilation_type(
+ is_json ? Smi::FromInt(Script::COMPILATION_TYPE_JSON) :
+ Smi::FromInt(Script::COMPILATION_TYPE_EVAL));
+ // For eval scripts add information on the function from which eval was
+ // called.
+ if (is_eval) {
+ JavaScriptFrameIterator it;
+ script->set_eval_from_function(it.frame()->function());
+ int offset = it.frame()->pc() - it.frame()->code()->instruction_start();
+ script->set_eval_from_instructions_offset(Smi::FromInt(offset));
+ }
+ }
+
// Notify debugger
Debugger::OnBeforeCompile(script);
#endif
@@ -156,7 +175,7 @@ static Handle<JSFunction> MakeFunction(bool is_global,
#if defined ENABLE_LOGGING_AND_PROFILING || defined ENABLE_OPROFILE_AGENT
// Log the code generation for the script. Check explicit whether logging is
// to avoid allocating when not required.
- if (Logger::is_enabled() || OProfileAgent::is_enabled()) {
+ if (Logger::IsEnabled() || OProfileAgent::is_enabled()) {
if (script->name()->IsString()) {
SmartPointer<char> data =
String::cast(script->name())->ToCString(DISALLOW_NULLS);
@@ -264,7 +283,6 @@ Handle<JSFunction> Compiler::Compile(Handle<String> source,
Handle<JSFunction> Compiler::CompileEval(Handle<String> source,
Handle<Context> context,
- int line_offset,
bool is_global,
bool is_json) {
int source_length = source->length();
@@ -284,7 +302,6 @@ Handle<JSFunction> Compiler::CompileEval(Handle<String> source,
if (result.is_null()) {
// Create a script object describing the script to be compiled.
Handle<Script> script = Factory::NewScript(source);
- script->set_line_offset(Smi::FromInt(line_offset));
result = MakeFunction(is_global,
true,
is_json,
@@ -303,7 +320,7 @@ Handle<JSFunction> Compiler::CompileEval(Handle<String> source,
bool Compiler::CompileLazy(Handle<SharedFunctionInfo> shared,
int loop_nesting) {
- ZoneScope zone_scope(DELETE_ON_EXIT);
+ CompilationZoneScope zone_scope(DELETE_ON_EXIT);
// The VM is in the COMPILER state until exiting this function.
VMState state(COMPILER);
@@ -355,9 +372,9 @@ bool Compiler::CompileLazy(Handle<SharedFunctionInfo> shared,
// Log the code generation. If source information is available include script
// name and line number. Check explicit whether logging is enabled as finding
// the line number is not for free.
- if (Logger::is_enabled() || OProfileAgent::is_enabled()) {
- Handle<String> func_name(lit->name()->length() > 0 ?
- *lit->name() : shared->inferred_name());
+ if (Logger::IsEnabled() || OProfileAgent::is_enabled()) {
+ Handle<String> func_name(name->length() > 0 ?
+ *name : shared->inferred_name());
if (script->name()->IsString()) {
int line_num = GetScriptLineNumber(script, start_position);
if (line_num > 0) {
diff --git a/deps/v8/src/compiler.h b/deps/v8/src/compiler.h
index 8abe130d8..9f02a8d3c 100644
--- a/deps/v8/src/compiler.h
+++ b/deps/v8/src/compiler.h
@@ -28,9 +28,12 @@
#ifndef V8_COMPILER_H_
#define V8_COMPILER_H_
+#include "frame-element.h"
#include "parser.h"
+#include "zone.h"
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
// The V8 compiler
//
@@ -59,7 +62,6 @@ class Compiler : public AllStatic {
// Compile a String source within a context for Eval.
static Handle<JSFunction> CompileEval(Handle<String> source,
Handle<Context> context,
- int line_offset,
bool is_global,
bool is_json);
@@ -69,6 +71,22 @@ class Compiler : public AllStatic {
static bool CompileLazy(Handle<SharedFunctionInfo> shared, int loop_nesting);
};
+
+// During compilation we need a global list of handles to constants
+// for frame elements. When the zone gets deleted, we make sure to
+// clear this list of handles as well.
+class CompilationZoneScope : public ZoneScope {
+ public:
+ explicit CompilationZoneScope(ZoneScopeMode mode) : ZoneScope(mode) { }
+ virtual ~CompilationZoneScope() {
+ if (ShouldDeleteOnExit()) {
+ FrameElement::ClearConstantList();
+ Result::ClearConstantList();
+ }
+ }
+};
+
+
} } // namespace v8::internal
#endif // V8_COMPILER_H_
diff --git a/deps/v8/src/contexts.cc b/deps/v8/src/contexts.cc
index 36b54881a..873c23ca5 100644
--- a/deps/v8/src/contexts.cc
+++ b/deps/v8/src/contexts.cc
@@ -31,7 +31,8 @@
#include "debug.h"
#include "scopeinfo.h"
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
JSBuiltinsObject* Context::builtins() {
GlobalObject* object = global();
diff --git a/deps/v8/src/contexts.h b/deps/v8/src/contexts.h
index f56143175..bdfc40b04 100644
--- a/deps/v8/src/contexts.h
+++ b/deps/v8/src/contexts.h
@@ -28,7 +28,8 @@
#ifndef V8_CONTEXTS_H_
#define V8_CONTEXTS_H_
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
enum ContextLookupFlags {
@@ -90,6 +91,8 @@ enum ContextLookupFlags {
V(FUNCTION_CACHE_INDEX, JSObject, function_cache) \
V(RUNTIME_CONTEXT_INDEX, Context, runtime_context) \
V(CALL_AS_FUNCTION_DELEGATE_INDEX, JSFunction, call_as_function_delegate) \
+ V(CALL_AS_CONSTRUCTOR_DELEGATE_INDEX, JSFunction, \
+ call_as_constructor_delegate) \
V(EMPTY_SCRIPT_INDEX, Script, empty_script) \
V(SCRIPT_FUNCTION_INDEX, JSFunction, script_function) \
V(CONTEXT_EXTENSION_FUNCTION_INDEX, JSFunction, context_extension_function) \
@@ -209,6 +212,7 @@ class Context: public FixedArray {
FUNCTION_CACHE_INDEX,
RUNTIME_CONTEXT_INDEX,
CALL_AS_FUNCTION_DELEGATE_INDEX,
+ CALL_AS_CONSTRUCTOR_DELEGATE_INDEX,
EMPTY_SCRIPT_INDEX,
SCRIPT_FUNCTION_INDEX,
CONTEXT_EXTENSION_FUNCTION_INDEX,
diff --git a/deps/v8/src/conversions-inl.h b/deps/v8/src/conversions-inl.h
index 64e4a796e..8c875d75b 100644
--- a/deps/v8/src/conversions-inl.h
+++ b/deps/v8/src/conversions-inl.h
@@ -38,7 +38,8 @@
#include "conversions.h"
#include "platform.h"
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
// The fast double-to-int conversion routine does not guarantee
// rounding towards zero.
diff --git a/deps/v8/src/conversions.cc b/deps/v8/src/conversions.cc
index 57a45688f..7f63d9b33 100644
--- a/deps/v8/src/conversions.cc
+++ b/deps/v8/src/conversions.cc
@@ -33,7 +33,8 @@
#include "factory.h"
#include "scanner.h"
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
int HexValue(uc32 c) {
if ('0' <= c && c <= '9')
diff --git a/deps/v8/src/conversions.h b/deps/v8/src/conversions.h
index 605327db7..b6589cb5c 100644
--- a/deps/v8/src/conversions.h
+++ b/deps/v8/src/conversions.h
@@ -28,7 +28,8 @@
#ifndef V8_CONVERSIONS_H_
#define V8_CONVERSIONS_H_
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
// The fast double-to-int conversion routine does not guarantee
// rounding towards zero.
diff --git a/deps/v8/src/counters.cc b/deps/v8/src/counters.cc
index bf9b8d8b0..239a5f7a0 100644
--- a/deps/v8/src/counters.cc
+++ b/deps/v8/src/counters.cc
@@ -30,7 +30,8 @@
#include "counters.h"
#include "platform.h"
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
CounterLookupCallback StatsTable::lookup_function_ = NULL;
CreateHistogramCallback StatsTable::create_histogram_function_ = NULL;
diff --git a/deps/v8/src/counters.h b/deps/v8/src/counters.h
index df1c70a91..5f4dca927 100644
--- a/deps/v8/src/counters.h
+++ b/deps/v8/src/counters.h
@@ -28,7 +28,8 @@
#ifndef V8_COUNTERS_H_
#define V8_COUNTERS_H_
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
// StatsCounters is an interface for plugging into external
// counters for monitoring. Counters can be looked up and
@@ -74,7 +75,7 @@ class StatsTable : public AllStatic {
// function. min and max define the expected minimum and maximum
// sample values. buckets is the maximum number of buckets
// that the samples will be grouped into.
- static void *CreateHistogram(const char* name,
+ static void* CreateHistogram(const char* name,
int min,
int max,
size_t buckets) {
diff --git a/deps/v8/src/cpu.h b/deps/v8/src/cpu.h
index d12c30c8c..ddc402f7d 100644
--- a/deps/v8/src/cpu.h
+++ b/deps/v8/src/cpu.h
@@ -36,7 +36,8 @@
#ifndef V8_CPU_H_
#define V8_CPU_H_
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
// ----------------------------------------------------------------------------
// CPU
diff --git a/deps/v8/src/d8-posix.cc b/deps/v8/src/d8-posix.cc
index c2dc5311a..3a091f93c 100644
--- a/deps/v8/src/d8-posix.cc
+++ b/deps/v8/src/d8-posix.cc
@@ -280,7 +280,10 @@ static void ExecSubprocess(int* exec_error_fds,
// Only get here if the exec failed. Write errno to the parent to tell
// them it went wrong. If it went well the pipe is closed.
int err = errno;
- write(exec_error_fds[kWriteFD], &err, sizeof(err));
+ int bytes_written;
+ do {
+ bytes_written = write(exec_error_fds[kWriteFD], &err, sizeof(err));
+ } while (bytes_written == -1 && errno == EINTR);
// Return (and exit child process).
}
diff --git a/deps/v8/src/d8.cc b/deps/v8/src/d8.cc
index 70143c3c0..ee845ee2c 100644
--- a/deps/v8/src/d8.cc
+++ b/deps/v8/src/d8.cc
@@ -451,7 +451,7 @@ void Shell::Initialize() {
i::Handle<i::JSFunction> script_fun = Utils::OpenHandle(*script);
i::Handle<i::Script> script_object =
i::Handle<i::Script>(i::Script::cast(script_fun->shared()->script()));
- script_object->set_type(i::Smi::FromInt(i::SCRIPT_TYPE_NATIVE));
+ script_object->set_type(i::Smi::FromInt(i::Script::TYPE_NATIVE));
// Create the evaluation context
evaluation_context_ = Context::New(NULL, global_template);
@@ -487,7 +487,7 @@ void Shell::OnExit() {
}
-static char* ReadChars(const char *name, int* size_out) {
+static char* ReadChars(const char* name, int* size_out) {
v8::Unlocker unlocker; // Release the V8 lock while reading files.
FILE* file = i::OS::FOpen(name, "rb");
if (file == NULL) return NULL;
@@ -659,7 +659,7 @@ int Shell::Main(int argc, char* argv[]) {
use_preemption = false;
} else if (strcmp(str, "--preemption-interval") == 0) {
if (i + 1 < argc) {
- char *end = NULL;
+ char* end = NULL;
preemption_interval = strtol(argv[++i], &end, 10); // NOLINT
if (preemption_interval <= 0 || *end != '\0' || errno == ERANGE) {
printf("Invalid value for --preemption-interval '%s'\n", argv[i]);
@@ -687,9 +687,9 @@ int Shell::Main(int argc, char* argv[]) {
i++;
} else if (strcmp(str, "-p") == 0 && i + 1 < argc) {
int size = 0;
- const char *files = ReadChars(argv[++i], &size);
+ const char* files = ReadChars(argv[++i], &size);
if (files == NULL) return 1;
- ShellThread *thread =
+ ShellThread* thread =
new ShellThread(threads.length(),
i::Vector<const char>(files, size));
thread->Start();
@@ -736,7 +736,7 @@ int Shell::Main(int argc, char* argv[]) {
if (run_shell)
RunShell();
for (int i = 0; i < threads.length(); i++) {
- i::Thread *thread = threads[i];
+ i::Thread* thread = threads[i];
thread->Join();
delete thread;
}
diff --git a/deps/v8/src/d8.js b/deps/v8/src/d8.js
index ea2fb4498..a8db9e1d9 100644
--- a/deps/v8/src/d8.js
+++ b/deps/v8/src/d8.js
@@ -93,6 +93,13 @@ Debug.ScriptType = { Native: 0,
Normal: 2 };
+// The different types of script compilations matching enum
+// Script::CompilationType in objects.h.
+Debug.ScriptCompilationType = { Host: 0,
+ Eval: 1,
+ JSON: 2 };
+
+
// Current debug state.
const kNoFrame = -1;
Debug.State = {
@@ -498,9 +505,26 @@ DebugRequest.prototype.stepCommandToJSONRequest_ = function(args) {
DebugRequest.prototype.backtraceCommandToJSONRequest_ = function(args) {
// Build a backtrace request from the text command.
var request = this.createRequest('backtrace');
+
+ // Default is to show top 10 frames.
+ request.arguments = {};
+ request.arguments.fromFrame = 0;
+ request.arguments.toFrame = 10;
+
args = args.split(/\s*[ ]+\s*/g);
- if (args.length == 2) {
- request.arguments = {};
+ if (args.length == 1 && args[0].length > 0) {
+ var frameCount = parseInt(args[0]);
+ if (frameCount > 0) {
+ // Show top frames.
+ request.arguments.fromFrame = 0;
+ request.arguments.toFrame = frameCount;
+ } else {
+ // Show bottom frames.
+ request.arguments.fromFrame = 0;
+ request.arguments.toFrame = -frameCount;
+ request.arguments.bottom = true;
+ }
+ } else if (args.length == 2) {
var fromFrame = parseInt(args[0]);
var toFrame = parseInt(args[1]);
if (isNaN(fromFrame) || fromFrame < 0) {
@@ -513,9 +537,13 @@ DebugRequest.prototype.backtraceCommandToJSONRequest_ = function(args) {
throw new Error('Invalid arguments start frame cannot be larger ' +
'than end frame.');
}
+ // Show frame range.
request.arguments.fromFrame = fromFrame;
request.arguments.toFrame = toFrame + 1;
+ } else if (args.length > 2) {
+ throw new Error('Invalid backtrace arguments.');
}
+
return request.toJSONProtocol();
};
@@ -755,7 +783,7 @@ DebugRequest.prototype.helpCommand_ = function(args) {
print(' break on function: location is #<id>#');
print(' break on script position: location is name:line[:column]');
print('clear <breakpoint #>');
- print('backtrace [from frame #] [to frame #]]');
+ print('backtrace [n] | [-n] | [from to]');
print('frame <frame #>');
print('step [in | next | out| min [step count]]');
print('print <expression>');
@@ -942,7 +970,18 @@ function DebugResponseDetails(response) {
if (body[i].name) {
result += body[i].name;
} else {
- result += '[unnamed] ';
+ if (body[i].compilationType == Debug.ScriptCompilationType.Eval) {
+ result += 'eval from ';
+ var script_value = response.lookup(body[i].evalFromScript.ref);
+ result += ' ' + script_value.field('name');
+ result += ':' + (body[i].evalFromLocation.line + 1);
+ result += ':' + body[i].evalFromLocation.column;
+ } else if (body[i].compilationType ==
+ Debug.ScriptCompilationType.JSON) {
+ result += 'JSON ';
+ } else { // body[i].compilation == Debug.ScriptCompilationType.Host
+ result += '[unnamed] ';
+ }
}
result += ' (lines: ';
result += body[i].lineCount;
@@ -1105,6 +1144,15 @@ ProtocolValue.prototype.type = function() {
/**
+ * Get a metadata field from a protocol value.
+ * @return {Object} the metadata field value
+ */
+ProtocolValue.prototype.field = function(name) {
+ return this.value_[name];
+}
+
+
+/**
* Check is the value is a primitive value.
* @return {boolean} true if the value is primitive
*/
diff --git a/deps/v8/src/dateparser-inl.h b/deps/v8/src/dateparser-inl.h
index 61a8c72e2..3d4161d19 100644
--- a/deps/v8/src/dateparser-inl.h
+++ b/deps/v8/src/dateparser-inl.h
@@ -28,7 +28,8 @@
#ifndef V8_DATEPARSER_INL_H_
#define V8_DATEPARSER_INL_H_
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
template <typename Char>
bool DateParser::Parse(Vector<Char> str, FixedArray* out) {
diff --git a/deps/v8/src/dateparser.cc b/deps/v8/src/dateparser.cc
index a1ae55df0..1cc9aa169 100644
--- a/deps/v8/src/dateparser.cc
+++ b/deps/v8/src/dateparser.cc
@@ -29,7 +29,8 @@
#include "dateparser.h"
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
bool DateParser::DayComposer::Write(FixedArray* output) {
int year = 0; // Default year is 0 (=> 2000) for KJS compatibility.
diff --git a/deps/v8/src/dateparser.h b/deps/v8/src/dateparser.h
index 04d7e8baf..d339a4fb7 100644
--- a/deps/v8/src/dateparser.h
+++ b/deps/v8/src/dateparser.h
@@ -30,7 +30,8 @@
#include "scanner.h"
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
class DateParser : public AllStatic {
public:
diff --git a/deps/v8/src/debug-agent.cc b/deps/v8/src/debug-agent.cc
index 63f143a79..62cc251ed 100644
--- a/deps/v8/src/debug-agent.cc
+++ b/deps/v8/src/debug-agent.cc
@@ -30,7 +30,8 @@
#include "debug-agent.h"
#ifdef ENABLE_DEBUGGER_SUPPORT
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
// Public V8 debugger API message handler function. This function just delegates
// to the debugger agent through it's data parameter.
diff --git a/deps/v8/src/debug-agent.h b/deps/v8/src/debug-agent.h
index a3c6025cd..04f883f40 100644
--- a/deps/v8/src/debug-agent.h
+++ b/deps/v8/src/debug-agent.h
@@ -32,7 +32,8 @@
#include "../include/v8-debug.h"
#include "platform.h"
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
// Forward decelrations.
class DebuggerAgentSession;
diff --git a/deps/v8/src/debug-delay.js b/deps/v8/src/debug-delay.js
index ff7d6fbd9..0b0501fde 100644
--- a/deps/v8/src/debug-delay.js
+++ b/deps/v8/src/debug-delay.js
@@ -43,7 +43,8 @@ Debug.DebugEvent = { Break: 1,
Exception: 2,
NewFunction: 3,
BeforeCompile: 4,
- AfterCompile: 5 };
+ AfterCompile: 5,
+ ScriptCollected: 6 };
// Types of exceptions that can be broken upon.
Debug.ExceptionBreak = { All : 0,
@@ -61,6 +62,12 @@ Debug.ScriptType = { Native: 0,
Extension: 1,
Normal: 2 };
+// The different types of script compilations matching enum
+// Script::CompilationType in objects.h.
+Debug.ScriptCompilationType = { Host: 0,
+ Eval: 1,
+ JSON: 2 };
+
// The different script break point types.
Debug.ScriptBreakPointType = { ScriptId: 0,
ScriptName: 1 };
@@ -833,7 +840,7 @@ BreakEvent.prototype.toJSONProtocol = function() {
event: "break",
body: { invocationText: this.exec_state_.frame(0).invocationText(),
}
- }
+ };
// Add script related information to the event if available.
var script = this.func().script();
@@ -861,8 +868,7 @@ BreakEvent.prototype.toJSONProtocol = function() {
o.body.breakpoints.push(number);
}
}
-
- return SimpleObjectToJSON_(o);
+ return JSON.stringify(ObjectToProtocolObject_(o));
};
@@ -923,7 +929,7 @@ ExceptionEvent.prototype.toJSONProtocol = function() {
o.event = "exception";
o.body = { uncaught: this.uncaught_,
exception: MakeMirror(this.exception_)
- }
+ };
// Exceptions might happen whithout any JavaScript frames.
if (this.exec_state_.frameCount() > 0) {
@@ -984,7 +990,8 @@ CompileEvent.prototype.toJSONProtocol = function() {
o.event = "afterCompile";
}
o.body = {};
- o.body.script = MakeScriptObject_(this.script_, true);
+ o.body.script = this.script_;
+ o.setOption('includeSource', true);
return o.toJSONProtocol();
}
@@ -1015,6 +1022,37 @@ NewFunctionEvent.prototype.setBreakPoint = function(p) {
};
+function MakeScriptCollectedEvent(exec_state, id) {
+ return new ScriptCollectedEvent(exec_state, id);
+}
+
+
+function ScriptCollectedEvent(exec_state, id) {
+ this.exec_state_ = exec_state;
+ this.id_ = id;
+}
+
+
+ScriptCollectedEvent.prototype.id = function() {
+ return this.id_;
+};
+
+
+ScriptCollectedEvent.prototype.executionState = function() {
+ return this.exec_state_;
+};
+
+
+ScriptCollectedEvent.prototype.toJSONProtocol = function() {
+ var o = new ProtocolMessage();
+ o.running = true;
+ o.event = "scriptCollected";
+ o.body = {};
+ o.body.script = { id: this.id() };
+ return o.toJSONProtocol();
+}
+
+
function MakeScriptObject_(script, include_source) {
var o = { id: script.id(),
name: script.name(),
@@ -1078,56 +1116,53 @@ ProtocolMessage.prototype.failed = function(message) {
ProtocolMessage.prototype.toJSONProtocol = function() {
// Encode the protocol header.
- var json = '{';
- json += '"seq":' + this.seq;
+ var json = {};
+ json.seq= this.seq;
if (this.request_seq) {
- json += ',"request_seq":' + this.request_seq;
+ json.request_seq = this.request_seq;
}
- json += ',"type":"' + this.type + '"';
+ json.type = this.type;
if (this.event) {
- json += ',"event":' + StringToJSON_(this.event);
+ json.event = this.event;
}
if (this.command) {
- json += ',"command":' + StringToJSON_(this.command);
+ json.command = this.command;
}
if (this.success) {
- json += ',"success":' + this.success;
+ json.success = this.success;
} else {
- json += ',"success":false';
+ json.success = false;
}
if (this.body) {
- json += ',"body":';
// Encode the body part.
+ var bodyJson;
var serializer = MakeMirrorSerializer(true, this.options_);
if (this.body instanceof Mirror) {
- json += serializer.serializeValue(this.body);
+ bodyJson = serializer.serializeValue(this.body);
} else if (this.body instanceof Array) {
- json += '[';
+ bodyJson = [];
for (var i = 0; i < this.body.length; i++) {
- if (i != 0) json += ',';
if (this.body[i] instanceof Mirror) {
- json += serializer.serializeValue(this.body[i]);
+ bodyJson.push(serializer.serializeValue(this.body[i]));
} else {
- json += SimpleObjectToJSON_(this.body[i], serializer);
+ bodyJson.push(ObjectToProtocolObject_(this.body[i], serializer));
}
}
- json += ']';
} else {
- json += SimpleObjectToJSON_(this.body, serializer);
+ bodyJson = ObjectToProtocolObject_(this.body, serializer);
}
- json += ',"refs":';
- json += serializer.serializeReferencedObjects();
+ json.body = bodyJson;
+ json.refs = serializer.serializeReferencedObjects();
}
if (this.message) {
- json += ',"message":' + StringToJSON_(this.message) ;
+ json.message = this.message;
}
if (this.running) {
- json += ',"running":true';
+ json.running = true;
} else {
- json += ',"running":false';
+ json.running = false;
}
- json += '}';
- return json;
+ return JSON.stringify(json);
}
@@ -1142,7 +1177,7 @@ DebugCommandProcessor.prototype.processDebugJSONRequest = function(json_request)
try {
try {
// Convert the JSON string to an object.
- request = %CompileString('(' + json_request + ')', 0, false)();
+ request = %CompileString('(' + json_request + ')', false)();
// Create an initial response.
response = this.createResponse(request);
@@ -1451,14 +1486,23 @@ DebugCommandProcessor.prototype.backtraceRequest_ = function(request, response)
// Get the range from the arguments.
if (request.arguments) {
- from_index = request.arguments.fromFrame;
- if (from_index < 0) {
- return response.failed('Invalid frame number');
+ if (request.arguments.fromFrame) {
+ from_index = request.arguments.fromFrame;
+ }
+ if (request.arguments.toFrame) {
+ to_index = request.arguments.toFrame;
}
- to_index = request.arguments.toFrame;
- if (to_index < 0) {
+ if (request.arguments.bottom) {
+ var tmp_index = total_frames - from_index;
+ from_index = total_frames - to_index
+ to_index = tmp_index;
+ }
+ if (from_index < 0 || to_index < 0) {
return response.failed('Invalid frame number');
}
+ if (request.arguments.compactFormat) {
+ response.setOption('compactFormat', true);
+ }
}
// Adjust the index.
@@ -1581,6 +1625,16 @@ DebugCommandProcessor.prototype.lookupRequest_ = function(request, response) {
return response.failed('Argument "handles" missing');
}
+ // Set 'includeSource' option for script lookup.
+ if (!IS_UNDEFINED(request.arguments.includeSource)) {
+ includeSource = %ToBoolean(request.arguments.includeSource);
+ response.setOption('includeSource', includeSource);
+ }
+
+ if (request.arguments.compactFormat) {
+ response.setOption('compactFormat', true);
+ }
+
// Lookup handles.
var mirrors = {};
for (var i = 0; i < handles.length; i++) {
@@ -1677,6 +1731,7 @@ DebugCommandProcessor.prototype.sourceRequest_ = function(request, response) {
DebugCommandProcessor.prototype.scriptsRequest_ = function(request, response) {
var types = ScriptTypeFlag(Debug.ScriptType.Normal);
var includeSource = false;
+ var idsToInclude = null;
if (request.arguments) {
// Pull out arguments.
if (!IS_UNDEFINED(request.arguments.types)) {
@@ -1690,6 +1745,14 @@ DebugCommandProcessor.prototype.scriptsRequest_ = function(request, response) {
includeSource = %ToBoolean(request.arguments.includeSource);
response.setOption('includeSource', includeSource);
}
+
+ if (IS_ARRAY(request.arguments.ids)) {
+ idsToInclude = {};
+ var ids = request.arguments.ids;
+ for (var i = 0; i < ids.length; i++) {
+ idsToInclude[ids[i]] = true;
+ }
+ }
}
// Collect all scripts in the heap.
@@ -1698,6 +1761,9 @@ DebugCommandProcessor.prototype.scriptsRequest_ = function(request, response) {
response.body = [];
for (var i = 0; i < scripts.length; i++) {
+ if (idsToInclude && !idsToInclude[scripts[i].id]) {
+ continue;
+ }
if (types & ScriptTypeFlag(scripts[i].type)) {
response.body.push(MakeMirror(scripts[i]));
}
@@ -1774,97 +1840,82 @@ DebugCommandProcessor.prototype.formatCFrame = function(cframe_value) {
/**
- * Convert an Object to its JSON representation (see http://www.json.org/).
- * This implementation simply runs through all string property names and adds
- * each property to the JSON representation for some predefined types. For type
- * "object" the function calls itself recursively unless the object has the
- * function property "toJSONProtocol" in which case that is used. This is not
- * a general implementation but sufficient for the debugger. Note that circular
- * structures will cause infinite recursion.
- * @param {Object} object The object to format as JSON
+ * Convert an Object to its debugger protocol representation. The representation
+ * may be serilized to a JSON object using JSON.stringify().
+ * This implementation simply runs through all string property names, converts
+ * each property value to a protocol value and adds the property to the result
+ * object. For type "object" the function will be called recursively. Note that
+ * circular structures will cause infinite recursion.
+ * @param {Object} object The object to format as protocol object.
* @param {MirrorSerializer} mirror_serializer The serializer to use if any
* mirror objects are encountered.
- * @return {string} JSON formatted object value
+ * @return {Object} Protocol object value.
*/
-function SimpleObjectToJSON_(object, mirror_serializer) {
- var content = [];
+function ObjectToProtocolObject_(object, mirror_serializer) {
+ var content = {};
for (var key in object) {
// Only consider string keys.
if (typeof key == 'string') {
- var property_value = object[key];
-
// Format the value based on its type.
- var property_value_json;
- switch (typeof property_value) {
- case 'object':
- if (property_value instanceof Mirror) {
- property_value_json = mirror_serializer.serializeValue(property_value);
- } else if (typeof property_value.toJSONProtocol == 'function') {
- property_value_json = property_value.toJSONProtocol(true)
- } else if (IS_ARRAY(property_value)){
- property_value_json = SimpleArrayToJSON_(property_value, mirror_serializer);
- } else {
- property_value_json = SimpleObjectToJSON_(property_value, mirror_serializer);
- }
- break;
-
- case 'boolean':
- property_value_json = BooleanToJSON_(property_value);
- break;
-
- case 'number':
- property_value_json = NumberToJSON_(property_value);
- break;
-
- case 'string':
- property_value_json = StringToJSON_(property_value);
- break;
-
- default:
- property_value_json = null;
- }
-
+ var property_value_json = ValueToProtocolValue_(object[key],
+ mirror_serializer);
// Add the property if relevant.
- if (property_value_json) {
- content.push(StringToJSON_(key) + ':' + property_value_json);
+ if (!IS_UNDEFINED(property_value_json)) {
+ content[key] = property_value_json;
}
}
}
-
- // Make JSON object representation.
- return '{' + content.join(',') + '}';
+
+ return content;
}
+
/**
- * Convert an array to its JSON representation. This is a VERY simple
- * implementation just to support what is needed for the debugger.
- * @param {Array} array The array to format as JSON
+ * Convert an array to its debugger protocol representation. It will convert
+ * each array element to a protocol value.
+ * @param {Array} array The array to format as protocol array.
* @param {MirrorSerializer} mirror_serializer The serializer to use if any
* mirror objects are encountered.
- * @return {string} JSON formatted array value
+ * @return {Array} Protocol array value.
*/
-function SimpleArrayToJSON_(array, mirror_serializer) {
- // Make JSON array representation.
- var json = '[';
+function ArrayToProtocolArray_(array, mirror_serializer) {
+ var json = [];
for (var i = 0; i < array.length; i++) {
- if (i != 0) {
- json += ',';
- }
- var elem = array[i];
- if (elem instanceof Mirror) {
- json += mirror_serializer.serializeValue(elem);
- } else if (IS_OBJECT(elem)) {
- json += SimpleObjectToJSON_(elem);
- } else if (IS_BOOLEAN(elem)) {
- json += BooleanToJSON_(elem);
- } else if (IS_NUMBER(elem)) {
- json += NumberToJSON_(elem);
- } else if (IS_STRING(elem)) {
- json += StringToJSON_(elem);
- } else {
- json += elem;
- }
+ json.push(ValueToProtocolValue_(array[i], mirror_serializer));
+ }
+ return json;
+}
+
+
+/**
+ * Convert a value to its debugger protocol representation.
+ * @param {*} value The value to format as protocol value.
+ * @param {MirrorSerializer} mirror_serializer The serializer to use if any
+ * mirror objects are encountered.
+ * @return {*} Protocol value.
+ */
+function ValueToProtocolValue_(value, mirror_serializer) {
+ // Format the value based on its type.
+ var json;
+ switch (typeof value) {
+ case 'object':
+ if (value instanceof Mirror) {
+ json = mirror_serializer.serializeValue(value);
+ } else if (IS_ARRAY(value)){
+ json = ArrayToProtocolArray_(value, mirror_serializer);
+ } else {
+ json = ObjectToProtocolObject_(value, mirror_serializer);
+ }
+ break;
+
+ case 'boolean':
+ case 'string':
+ case 'number':
+ json = value;
+ break
+
+ default:
+ json = null;
}
- json += ']';
return json;
}
diff --git a/deps/v8/src/debug.cc b/deps/v8/src/debug.cc
index 8422a6710..0daf5642d 100644
--- a/deps/v8/src/debug.cc
+++ b/deps/v8/src/debug.cc
@@ -31,6 +31,7 @@
#include "arguments.h"
#include "bootstrapper.h"
#include "code-stubs.h"
+#include "compilation-cache.h"
#include "compiler.h"
#include "debug.h"
#include "execution.h"
@@ -43,7 +44,8 @@
#include "../include/v8-debug.h"
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
#ifdef ENABLE_DEBUGGER_SUPPORT
static void PrintLn(v8::Local<v8::Value> value) {
@@ -425,6 +427,7 @@ void BreakLocationIterator::RinfoNext() {
bool Debug::has_break_points_ = false;
+ScriptCache* Debug::script_cache_ = NULL;
DebugInfoListNode* Debug::debug_info_list_ = NULL;
@@ -440,7 +443,7 @@ void Debug::ThreadInit() {
thread_local_.step_into_fp_ = 0;
thread_local_.after_break_target_ = 0;
thread_local_.debugger_entry_ = NULL;
- thread_local_.preemption_pending_ = false;
+ thread_local_.pending_interrupts_ = 0;
}
@@ -486,29 +489,77 @@ Code* Debug::debug_break_return_entry_ = NULL;
Code* Debug::debug_break_return_ = NULL;
-void Debug::HandleWeakDebugInfo(v8::Persistent<v8::Value> obj, void* data) {
- DebugInfoListNode* node = reinterpret_cast<DebugInfoListNode*>(data);
- RemoveDebugInfo(node->debug_info());
-#ifdef DEBUG
- node = Debug::debug_info_list_;
- while (node != NULL) {
- ASSERT(node != reinterpret_cast<DebugInfoListNode*>(data));
- node = node->next();
+void ScriptCache::Add(Handle<Script> script) {
+ // Create an entry in the hash map for the script.
+ int id = Smi::cast(script->id())->value();
+ HashMap::Entry* entry =
+ HashMap::Lookup(reinterpret_cast<void*>(id), Hash(id), true);
+ if (entry->value != NULL) {
+ ASSERT(*script == *reinterpret_cast<Script**>(entry->value));
+ return;
}
-#endif
+
+ // Globalize the script object, make it weak and use the location of the
+ // global handle as the value in the hash map.
+ Handle<Script> script_ =
+ Handle<Script>::cast((GlobalHandles::Create(*script)));
+ GlobalHandles::MakeWeak(reinterpret_cast<Object**>(script_.location()),
+ this, ScriptCache::HandleWeakScript);
+ entry->value = script_.location();
}
-DebugInfoListNode::DebugInfoListNode(DebugInfo* debug_info): next_(NULL) {
- // Globalize the request debug info object and make it weak.
- debug_info_ = Handle<DebugInfo>::cast((GlobalHandles::Create(debug_info)));
- GlobalHandles::MakeWeak(reinterpret_cast<Object**>(debug_info_.location()),
- this, Debug::HandleWeakDebugInfo);
+Handle<FixedArray> ScriptCache::GetScripts() {
+ Handle<FixedArray> instances = Factory::NewFixedArray(occupancy());
+ int count = 0;
+ for (HashMap::Entry* entry = Start(); entry != NULL; entry = Next(entry)) {
+ ASSERT(entry->value != NULL);
+ if (entry->value != NULL) {
+ instances->set(count, *reinterpret_cast<Script**>(entry->value));
+ count++;
+ }
+ }
+ return instances;
}
-DebugInfoListNode::~DebugInfoListNode() {
- GlobalHandles::Destroy(reinterpret_cast<Object**>(debug_info_.location()));
+void ScriptCache::ProcessCollectedScripts() {
+ for (int i = 0; i < collected_scripts_.length(); i++) {
+ Debugger::OnScriptCollected(collected_scripts_[i]);
+ }
+ collected_scripts_.Clear();
+}
+
+
+void ScriptCache::Clear() {
+ // Iterate the script cache to get rid of all the weak handles.
+ for (HashMap::Entry* entry = Start(); entry != NULL; entry = Next(entry)) {
+ ASSERT(entry != NULL);
+ Object** location = reinterpret_cast<Object**>(entry->value);
+ ASSERT((*location)->IsScript());
+ GlobalHandles::ClearWeakness(location);
+ GlobalHandles::Destroy(location);
+ }
+ // Clear the content of the hash map.
+ HashMap::Clear();
+}
+
+
+void ScriptCache::HandleWeakScript(v8::Persistent<v8::Value> obj, void* data) {
+ ScriptCache* script_cache = reinterpret_cast<ScriptCache*>(data);
+ // Find the location of the global handle.
+ Script** location =
+ reinterpret_cast<Script**>(Utils::OpenHandle(*obj).location());
+ ASSERT((*location)->IsScript());
+
+ // Remove the entry from the cache.
+ int id = Smi::cast((*location)->id())->value();
+ script_cache->Remove(reinterpret_cast<void*>(id), Hash(id));
+ script_cache->collected_scripts_.Add(id);
+
+ // Clear the weak handle.
+ obj.Dispose();
+ obj.Clear();
}
@@ -528,6 +579,32 @@ void Debug::Setup(bool create_heap_objects) {
}
+void Debug::HandleWeakDebugInfo(v8::Persistent<v8::Value> obj, void* data) {
+ DebugInfoListNode* node = reinterpret_cast<DebugInfoListNode*>(data);
+ RemoveDebugInfo(node->debug_info());
+#ifdef DEBUG
+ node = Debug::debug_info_list_;
+ while (node != NULL) {
+ ASSERT(node != reinterpret_cast<DebugInfoListNode*>(data));
+ node = node->next();
+ }
+#endif
+}
+
+
+DebugInfoListNode::DebugInfoListNode(DebugInfo* debug_info): next_(NULL) {
+ // Globalize the request debug info object and make it weak.
+ debug_info_ = Handle<DebugInfo>::cast((GlobalHandles::Create(debug_info)));
+ GlobalHandles::MakeWeak(reinterpret_cast<Object**>(debug_info_.location()),
+ this, Debug::HandleWeakDebugInfo);
+}
+
+
+DebugInfoListNode::~DebugInfoListNode() {
+ GlobalHandles::Destroy(reinterpret_cast<Object**>(debug_info_.location()));
+}
+
+
bool Debug::CompileDebuggerScript(int index) {
HandleScope scope;
@@ -575,7 +652,7 @@ bool Debug::CompileDebuggerScript(int index) {
// Mark this script as native and return successfully.
Handle<Script> script(Script::cast(function->shared()->script()));
- script->set_type(Smi::FromInt(SCRIPT_TYPE_NATIVE));
+ script->set_type(Smi::FromInt(Script::TYPE_NATIVE));
return true;
}
@@ -627,6 +704,7 @@ bool Debug::Load() {
// Debugger loaded.
debug_context_ = Handle<Context>::cast(GlobalHandles::Create(*context));
+
return true;
}
@@ -637,6 +715,9 @@ void Debug::Unload() {
return;
}
+ // Clear the script cache.
+ DestroyScriptCache();
+
// Clear debugger context global handle.
GlobalHandles::Destroy(reinterpret_cast<Object**>(debug_context_.location()));
debug_context_ = Handle<Context>();
@@ -646,7 +727,7 @@ void Debug::Unload() {
// Set the flag indicating that preemption happened during debugging.
void Debug::PreemptionWhileInDebugger() {
ASSERT(InDebugger());
- Debug::set_preemption_pending(true);
+ Debug::set_interrupts_pending(PREEMPT);
}
@@ -1414,6 +1495,94 @@ void Debug::ClearMirrorCache() {
}
+// If an object given is an external string, check that the underlying
+// resource is accessible. For other kinds of objects, always return true.
+static bool IsExternalStringValid(Object* str) {
+ if (!str->IsString() || !StringShape(String::cast(str)).IsExternal()) {
+ return true;
+ }
+ if (String::cast(str)->IsAsciiRepresentation()) {
+ return ExternalAsciiString::cast(str)->resource() != NULL;
+ } else if (String::cast(str)->IsTwoByteRepresentation()) {
+ return ExternalTwoByteString::cast(str)->resource() != NULL;
+ } else {
+ return true;
+ }
+}
+
+
+void Debug::CreateScriptCache() {
+ HandleScope scope;
+
+ // Perform two GCs to get rid of all unreferenced scripts. The first GC gets
+ // rid of all the cached script wrappers and the second gets rid of the
+ // scripts which is no longer referenced.
+ Heap::CollectAllGarbage();
+ Heap::CollectAllGarbage();
+
+ ASSERT(script_cache_ == NULL);
+ script_cache_ = new ScriptCache();
+
+ // Scan heap for Script objects.
+ int count = 0;
+ HeapIterator iterator;
+ while (iterator.has_next()) {
+ HeapObject* obj = iterator.next();
+ ASSERT(obj != NULL);
+ if (obj->IsScript() && IsExternalStringValid(Script::cast(obj)->source())) {
+ script_cache_->Add(Handle<Script>(Script::cast(obj)));
+ count++;
+ }
+ }
+}
+
+
+void Debug::DestroyScriptCache() {
+ // Get rid of the script cache if it was created.
+ if (script_cache_ != NULL) {
+ delete script_cache_;
+ script_cache_ = NULL;
+ }
+}
+
+
+void Debug::AddScriptToScriptCache(Handle<Script> script) {
+ if (script_cache_ != NULL) {
+ script_cache_->Add(script);
+ }
+}
+
+
+Handle<FixedArray> Debug::GetLoadedScripts() {
+ // Create and fill the script cache when the loaded scripts is requested for
+ // the first time.
+ if (script_cache_ == NULL) {
+ CreateScriptCache();
+ }
+
+ // If the script cache is not active just return an empty array.
+ ASSERT(script_cache_ != NULL);
+ if (script_cache_ == NULL) {
+ Factory::NewFixedArray(0);
+ }
+
+ // Perform GC to get unreferenced scripts evicted from the cache before
+ // returning the content.
+ Heap::CollectAllGarbage();
+
+ // Get the scripts from the cache.
+ return script_cache_->GetScripts();
+}
+
+
+void Debug::AfterGarbageCollection() {
+ // Generate events for collected scripts.
+ if (script_cache_ != NULL) {
+ script_cache_->ProcessCollectedScripts();
+ }
+}
+
+
Mutex* Debugger::debugger_access_ = OS::CreateMutex();
Handle<Object> Debugger::event_listener_ = Handle<Object>();
Handle<Object> Debugger::event_listener_data_ = Handle<Object>();
@@ -1421,7 +1590,7 @@ bool Debugger::compiling_natives_ = false;
bool Debugger::is_loading_debugger_ = false;
bool Debugger::never_unload_debugger_ = false;
v8::Debug::MessageHandler2 Debugger::message_handler_ = NULL;
-bool Debugger::message_handler_cleared_ = false;
+bool Debugger::debugger_unload_pending_ = false;
v8::Debug::HostDispatchHandler Debugger::host_dispatch_handler_ = NULL;
int Debugger::host_dispatch_micros_ = 100 * 1000;
DebuggerAgent* Debugger::agent_ = NULL;
@@ -1518,6 +1687,21 @@ Handle<Object> Debugger::MakeCompileEvent(Handle<Script> script,
}
+Handle<Object> Debugger::MakeScriptCollectedEvent(int id,
+ bool* caught_exception) {
+ // Create the script collected event object.
+ Handle<Object> exec_state = MakeExecutionState(caught_exception);
+ Handle<Object> id_object = Handle<Smi>(Smi::FromInt(id));
+ const int argc = 2;
+ Object** argv[argc] = { exec_state.location(), id_object.location() };
+
+ return MakeJSObject(CStrVector("MakeScriptCollectedEvent"),
+ argc,
+ argv,
+ caught_exception);
+}
+
+
void Debugger::OnException(Handle<Object> exception, bool uncaught) {
HandleScope scope;
@@ -1624,12 +1808,15 @@ void Debugger::OnBeforeCompile(Handle<Script> script) {
void Debugger::OnAfterCompile(Handle<Script> script, Handle<JSFunction> fun) {
HandleScope scope;
- // No compile events while compiling natives.
- if (compiling_natives()) return;
+ // Add the newly compiled script to the script cache.
+ Debug::AddScriptToScriptCache(script);
// No more to do if not debugging.
if (!IsDebuggerActive()) return;
+ // No compile events while compiling natives.
+ if (compiling_natives()) return;
+
// Store whether in debugger before entering debugger.
bool in_debugger = Debug::InDebugger();
@@ -1708,11 +1895,43 @@ void Debugger::OnNewFunction(Handle<JSFunction> function) {
}
+void Debugger::OnScriptCollected(int id) {
+ HandleScope scope;
+
+ // No more to do if not debugging.
+ if (!IsDebuggerActive()) return;
+ if (!Debugger::EventActive(v8::ScriptCollected)) return;
+
+ // Enter the debugger.
+ EnterDebugger debugger;
+ if (debugger.FailedToEnter()) return;
+
+ // Create the script collected state object.
+ bool caught_exception = false;
+ Handle<Object> event_data = MakeScriptCollectedEvent(id,
+ &caught_exception);
+ // Bail out and don't call debugger if exception.
+ if (caught_exception) {
+ return;
+ }
+
+ // Process debug event.
+ ProcessDebugEvent(v8::ScriptCollected,
+ Handle<JSObject>::cast(event_data),
+ true);
+}
+
+
void Debugger::ProcessDebugEvent(v8::DebugEvent event,
Handle<JSObject> event_data,
bool auto_continue) {
HandleScope scope;
+ // Clear any pending debug break if this is a real break.
+ if (!auto_continue) {
+ Debug::clear_interrupt_pending(DEBUGBREAK);
+ }
+
// Create the execution state.
bool caught_exception = false;
Handle<Object> exec_state = MakeExecutionState(&caught_exception);
@@ -1756,9 +1975,6 @@ void Debugger::ProcessDebugEvent(v8::DebugEvent event,
}
}
}
-
- // Clear the mirror cache.
- Debug::ClearMirrorCache();
}
@@ -1771,8 +1987,8 @@ void Debugger::UnloadDebugger() {
Debug::Unload();
}
- // Clear the flag indicating that the message handler was recently cleared.
- message_handler_cleared_ = false;
+ // Clear the flag indicating that the debugger should be unloaded.
+ debugger_unload_pending_ = false;
}
@@ -1798,6 +2014,9 @@ void Debugger::NotifyMessageHandler(v8::DebugEvent event,
case v8::AfterCompile:
sendEventMessage = true;
break;
+ case v8::ScriptCollected:
+ sendEventMessage = true;
+ break;
case v8::NewFunction:
break;
default:
@@ -1820,7 +2039,12 @@ void Debugger::NotifyMessageHandler(v8::DebugEvent event,
Handle<JSObject>::cast(event_data));
InvokeMessageHandler(message);
}
- if (auto_continue && !HasCommands()) {
+
+ // If auto continue don't make the event cause a break, but process messages
+ // in the queue if any. For script collected events don't even process
+ // messages in the queue as the execution state might not be what is expected
+ // by the client.
+ if ((auto_continue && !HasCommands()) || event == v8::ScriptCollected) {
return;
}
@@ -1956,10 +2180,7 @@ void Debugger::SetEventListener(Handle<Object> callback,
event_listener_data_ = Handle<Object>::cast(GlobalHandles::Create(*data));
}
- // Unload the debugger if event listener cleared.
- if (callback->IsUndefined()) {
- UnloadDebugger();
- }
+ ListenersChanged();
}
@@ -1967,10 +2188,8 @@ void Debugger::SetMessageHandler(v8::Debug::MessageHandler2 handler) {
ScopedLock with(debugger_access_);
message_handler_ = handler;
+ ListenersChanged();
if (handler == NULL) {
- // Indicate that the message handler was recently cleared.
- message_handler_cleared_ = true;
-
// Send an empty command to the debugger if in a break to make JavaScript
// run again if the debugger is closed.
if (Debug::InDebugger()) {
@@ -1980,6 +2199,25 @@ void Debugger::SetMessageHandler(v8::Debug::MessageHandler2 handler) {
}
+void Debugger::ListenersChanged() {
+ if (IsDebuggerActive()) {
+ // Disable the compilation cache when the debugger is active.
+ CompilationCache::Disable();
+ } else {
+ CompilationCache::Enable();
+
+ // Unload the debugger if event listener and message handler cleared.
+ if (Debug::InDebugger()) {
+ // If we are in debugger set the flag to unload the debugger when last
+ // EnterDebugger on the current stack is destroyed.
+ debugger_unload_pending_ = true;
+ } else {
+ UnloadDebugger();
+ }
+ }
+}
+
+
void Debugger::SetHostDispatchHandler(v8::Debug::HostDispatchHandler handler,
int period) {
host_dispatch_handler_ = handler;
@@ -2172,7 +2410,14 @@ v8::Handle<v8::String> MessageImpl::GetJSON() const {
v8::Handle<v8::Context> MessageImpl::GetEventContext() const {
- return v8::Utils::ToLocal(Debug::debugger_entry()->GetContext());
+ Handle<Context> context = Debug::debugger_entry()->GetContext();
+ // Top::context() may have been NULL when "script collected" event occured.
+ if (*context == NULL) {
+ ASSERT(event_ == v8::ScriptCollected);
+ return v8::Local<v8::Context>();
+ }
+ Handle<Context> global_context(context->global_context());
+ return v8::Utils::ToLocal(global_context);
}
diff --git a/deps/v8/src/debug.h b/deps/v8/src/debug.h
index 35336cb1d..a1abceda8 100644
--- a/deps/v8/src/debug.h
+++ b/deps/v8/src/debug.h
@@ -33,6 +33,7 @@
#include "debug-agent.h"
#include "execution.h"
#include "factory.h"
+#include "hashmap.h"
#include "platform.h"
#include "string-stream.h"
#include "v8threads.h"
@@ -40,7 +41,8 @@
#ifdef ENABLE_DEBUGGER_SUPPORT
#include "../include/v8-debug.h"
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
// Forward declarations.
@@ -144,6 +146,42 @@ class BreakLocationIterator {
};
+// Cache of all script objects in the heap. When a script is added a weak handle
+// to it is created and that weak handle is stored in the cache. The weak handle
+// callback takes care of removing the script from the cache. The key used in
+// the cache is the script id.
+class ScriptCache : private HashMap {
+ public:
+ ScriptCache() : HashMap(ScriptMatch), collected_scripts_(10) {}
+ virtual ~ScriptCache() { Clear(); }
+
+ // Add script to the cache.
+ void Add(Handle<Script> script);
+
+ // Return the scripts in the cache.
+ Handle<FixedArray> GetScripts();
+
+ // Generate debugger events for collected scripts.
+ void ProcessCollectedScripts();
+
+ private:
+ // Calculate the hash value from the key (script id).
+ static uint32_t Hash(int key) { return ComputeIntegerHash(key); }
+
+ // Scripts match if their keys (script id) match.
+ static bool ScriptMatch(void* key1, void* key2) { return key1 == key2; }
+
+ // Clear the cache releasing all the weak handles.
+ void Clear();
+
+ // Weak handle callback for scripts in the cache.
+ static void HandleWeakScript(v8::Persistent<v8::Value> obj, void* data);
+
+ // List used during GC to temporarily store id's of collected scripts.
+ List<int> collected_scripts_;
+};
+
+
// Linked list holding debug info objects. The debug info objects are kept as
// weak handles to avoid a debug info object to keep a function alive.
class DebugInfoListNode {
@@ -230,9 +268,6 @@ class Debug {
}
static int break_id() { return thread_local_.break_id_; }
-
-
-
static bool StepInActive() { return thread_local_.step_into_fp_ != 0; }
static void HandleStepIn(Handle<JSFunction> function,
Address fp,
@@ -247,11 +282,19 @@ class Debug {
thread_local_.debugger_entry_ = entry;
}
- static bool preemption_pending() {
- return thread_local_.preemption_pending_;
+ // Check whether any of the specified interrupts are pending.
+ static bool is_interrupt_pending(InterruptFlag what) {
+ return (thread_local_.pending_interrupts_ & what) != 0;
}
- static void set_preemption_pending(bool preemption_pending) {
- thread_local_.preemption_pending_ = preemption_pending;
+
+ // Set specified interrupts as pending.
+ static void set_interrupts_pending(InterruptFlag what) {
+ thread_local_.pending_interrupts_ |= what;
+ }
+
+ // Clear specified interrupts from pending.
+ static void clear_interrupt_pending(InterruptFlag what) {
+ thread_local_.pending_interrupts_ &= ~static_cast<int>(what);
}
// Getter and setter for the disable break state.
@@ -307,6 +350,15 @@ class Debug {
// Mirror cache handling.
static void ClearMirrorCache();
+ // Script cache handling.
+ static void CreateScriptCache();
+ static void DestroyScriptCache();
+ static void AddScriptToScriptCache(Handle<Script> script);
+ static Handle<FixedArray> GetLoadedScripts();
+
+ // Garbage collection notifications.
+ static void AfterGarbageCollection();
+
// Code generation assumptions.
static const int kIa32CallInstructionLength = 5;
static const int kIa32JSReturnSequenceLength = 6;
@@ -343,6 +395,11 @@ class Debug {
// Boolean state indicating whether any break points are set.
static bool has_break_points_;
+
+ // Cache of all scripts in the heap.
+ static ScriptCache* script_cache_;
+
+ // List of active debug info objects.
static DebugInfoListNode* debug_info_list_;
static bool disable_break_;
@@ -382,8 +439,8 @@ class Debug {
// Top debugger entry.
EnterDebugger* debugger_entry_;
- // Preemption happened while debugging.
- bool preemption_pending_;
+ // Pending interrupts scheduled while debugging.
+ int pending_interrupts_;
};
// Storage location for registers when handling debug break calls
@@ -532,12 +589,15 @@ class Debugger {
static Handle<Object> MakeCompileEvent(Handle<Script> script,
bool before,
bool* caught_exception);
+ static Handle<Object> MakeScriptCollectedEvent(int id,
+ bool* caught_exception);
static void OnDebugBreak(Handle<Object> break_points_hit, bool auto_continue);
static void OnException(Handle<Object> exception, bool uncaught);
static void OnBeforeCompile(Handle<Script> script);
static void OnAfterCompile(Handle<Script> script,
Handle<JSFunction> fun);
static void OnNewFunction(Handle<JSFunction> fun);
+ static void OnScriptCollected(int id);
static void ProcessDebugEvent(v8::DebugEvent event,
Handle<JSObject> event_data,
bool auto_continue);
@@ -578,7 +638,7 @@ class Debugger {
ScopedLock with(debugger_access_);
// Check whether the message handler was been cleared.
- if (message_handler_cleared_) {
+ if (debugger_unload_pending_) {
UnloadDebugger();
}
@@ -595,6 +655,7 @@ class Debugger {
private:
static bool IsDebuggerActive();
+ static void ListenersChanged();
static Mutex* debugger_access_; // Mutex guarding debugger variables.
static Handle<Object> event_listener_; // Global handle to listener.
@@ -603,7 +664,7 @@ class Debugger {
static bool is_loading_debugger_; // Are we loading the debugger?
static bool never_unload_debugger_; // Can we unload the debugger?
static v8::Debug::MessageHandler2 message_handler_;
- static bool message_handler_cleared_; // Was message handler cleared?
+ static bool debugger_unload_pending_; // Was message handler cleared?
static v8::Debug::HostDispatchHandler host_dispatch_handler_;
static int host_dispatch_micros_;
@@ -626,7 +687,8 @@ class EnterDebugger BASE_EMBEDDED {
EnterDebugger()
: prev_(Debug::debugger_entry()),
has_js_frames_(!it_.done()) {
- ASSERT(prev_ == NULL ? !Debug::preemption_pending() : true);
+ ASSERT(prev_ != NULL || !Debug::is_interrupt_pending(PREEMPT));
+ ASSERT(prev_ != NULL || !Debug::is_interrupt_pending(DEBUGBREAK));
// Link recursive debugger entry.
Debug::set_debugger_entry(this);
@@ -656,22 +718,42 @@ class EnterDebugger BASE_EMBEDDED {
// Restore to the previous break state.
Debug::SetBreak(break_frame_id_, break_id_);
- // Request preemption when leaving the last debugger entry and a preemption
- // had been recorded while debugging. This is to avoid starvation in some
- // debugging scenarios.
- if (prev_ == NULL && Debug::preemption_pending()) {
- StackGuard::Preempt();
- Debug::set_preemption_pending(false);
- }
+ // Check for leaving the debugger.
+ if (prev_ == NULL) {
+ // Clear mirror cache when leaving the debugger. Skip this if there is a
+ // pending exception as clearing the mirror cache calls back into
+ // JavaScript. This can happen if the v8::Debug::Call is used in which
+ // case the exception should end up in the calling code.
+ if (!Top::has_pending_exception()) {
+ // Try to avoid any pending debug break breaking in the clear mirror
+ // cache JavaScript code.
+ if (StackGuard::IsDebugBreak()) {
+ Debug::set_interrupts_pending(DEBUGBREAK);
+ StackGuard::Continue(DEBUGBREAK);
+ }
+ Debug::ClearMirrorCache();
+ }
- // If there are commands in the queue when leaving the debugger request that
- // these commands are processed.
- if (prev_ == NULL && Debugger::HasCommands()) {
- StackGuard::DebugCommand();
- }
+ // Request preemption and debug break when leaving the last debugger entry
+ // if any of these where recorded while debugging.
+ if (Debug::is_interrupt_pending(PREEMPT)) {
+ // This re-scheduling of preemption is to avoid starvation in some
+ // debugging scenarios.
+ Debug::clear_interrupt_pending(PREEMPT);
+ StackGuard::Preempt();
+ }
+ if (Debug::is_interrupt_pending(DEBUGBREAK)) {
+ Debug::clear_interrupt_pending(DEBUGBREAK);
+ StackGuard::DebugBreak();
+ }
- // If leaving the debugger with the debugger no longer active unload it.
- if (prev_ == NULL) {
+ // If there are commands in the queue when leaving the debugger request
+ // that these commands are processed.
+ if (Debugger::HasCommands()) {
+ StackGuard::DebugCommand();
+ }
+
+ // If leaving the debugger with the debugger no longer active unload it.
if (!Debugger::IsDebuggerActive()) {
Debugger::UnloadDebugger();
}
diff --git a/deps/v8/src/disassembler.cc b/deps/v8/src/disassembler.cc
index a838a08ff..95022d052 100644
--- a/deps/v8/src/disassembler.cc
+++ b/deps/v8/src/disassembler.cc
@@ -36,16 +36,18 @@
#include "serialize.h"
#include "string-stream.h"
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
#ifdef ENABLE_DISASSEMBLER
void Disassembler::Dump(FILE* f, byte* begin, byte* end) {
for (byte* pc = begin; pc < end; pc++) {
if (f == NULL) {
- PrintF("%p %4d %02x\n", pc, pc - begin, *pc);
+ PrintF("%" V8PRIxPTR " %4" V8PRIdPTR " %02x\n", pc, pc - begin, *pc);
} else {
- fprintf(f, "%p %4d %02x\n", pc, pc - begin, *pc);
+ fprintf(f, "%" V8PRIxPTR " %4" V8PRIdPTR " %02x\n",
+ reinterpret_cast<uintptr_t>(pc), pc - begin, *pc);
}
}
}
@@ -144,8 +146,8 @@ static int DecodeIt(FILE* f,
// raw pointer embedded in code stream, e.g., jump table
byte* ptr = *reinterpret_cast<byte**>(pc);
OS::SNPrintF(decode_buffer,
- "%08x jump table entry %4d",
- reinterpret_cast<int32_t>(ptr),
+ "%08" V8PRIxPTR " jump table entry %4" V8PRIdPTR,
+ ptr,
ptr - begin);
pc += 4;
} else {
diff --git a/deps/v8/src/disassembler.h b/deps/v8/src/disassembler.h
index 5003c00e2..68a338d18 100644
--- a/deps/v8/src/disassembler.h
+++ b/deps/v8/src/disassembler.h
@@ -28,7 +28,8 @@
#ifndef V8_DISASSEMBLER_H_
#define V8_DISASSEMBLER_H_
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
class Disassembler : public AllStatic {
public:
diff --git a/deps/v8/src/execution.cc b/deps/v8/src/execution.cc
index 32dde9e4c..fa3c2ecb2 100644
--- a/deps/v8/src/execution.cc
+++ b/deps/v8/src/execution.cc
@@ -43,7 +43,8 @@
#include "debug.h"
#include "v8threads.h"
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
static Handle<Object> Invoke(bool construct,
@@ -188,6 +189,24 @@ Handle<Object> Execution::GetFunctionDelegate(Handle<Object> object) {
}
+Handle<Object> Execution::GetConstructorDelegate(Handle<Object> object) {
+ ASSERT(!object->IsJSFunction());
+
+ // If you return a function from here, it will be called when an
+ // attempt is made to call the given object as a constructor.
+
+ // Objects created through the API can have an instance-call handler
+ // that should be used when calling the object as a function.
+ if (object->IsHeapObject() &&
+ HeapObject::cast(*object)->map()->has_instance_call_handler()) {
+ return Handle<JSFunction>(
+ Top::global_context()->call_as_constructor_delegate());
+ }
+
+ return Factory::undefined_value();
+}
+
+
// Static state for stack guards.
StackGuard::ThreadLocal StackGuard::thread_local_;
@@ -569,20 +588,7 @@ Object* Execution::DebugBreakHelper() {
return Heap::undefined_value();
}
- // Don't break in system functions. If the current function is
- // either in the builtins object of some context or is in the debug
- // context just return with the debug break stack guard active.
- JavaScriptFrameIterator it;
- JavaScriptFrame* frame = it.frame();
- Object* fun = frame->function();
- if (fun->IsJSFunction()) {
- GlobalObject* global = JSFunction::cast(fun)->context()->global();
- if (global->IsJSBuiltinsObject() || Debug::IsDebugGlobal(global)) {
- return Heap::undefined_value();
- }
- }
-
- // Check for debug command break only.
+ // Collect the break state before clearing the flags.
bool debug_command_only =
StackGuard::IsDebugCommand() && !StackGuard::IsDebugBreak();
@@ -590,11 +596,6 @@ Object* Execution::DebugBreakHelper() {
StackGuard::Continue(DEBUGBREAK);
StackGuard::Continue(DEBUGCOMMAND);
- // If debug command only and already in debugger ignore it.
- if (debug_command_only && Debug::InDebugger()) {
- return Heap::undefined_value();
- }
-
HandleScope scope;
// Enter the debugger. Just continue if we fail to enter the debugger.
EnterDebugger debugger;
@@ -602,7 +603,8 @@ Object* Execution::DebugBreakHelper() {
return Heap::undefined_value();
}
- // Notify the debug event listeners.
+ // Notify the debug event listeners. Indicate auto continue if the break was
+ // a debug command break.
Debugger::OnDebugBreak(Factory::undefined_value(), debug_command_only);
// Return to continue execution.
diff --git a/deps/v8/src/execution.h b/deps/v8/src/execution.h
index 6f2f68922..8cfdec27f 100644
--- a/deps/v8/src/execution.h
+++ b/deps/v8/src/execution.h
@@ -28,7 +28,8 @@
#ifndef V8_EXECUTION_H_
#define V8_EXECUTION_H_
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
// Flag used to set the interrupt causes.
@@ -129,6 +130,10 @@ class Execution : public AllStatic {
// Get a function delegate (or undefined) for the given non-function
// object. Used for support calling objects as functions.
static Handle<Object> GetFunctionDelegate(Handle<Object> object);
+
+ // Get a function delegate (or undefined) for the given non-function
+ // object. Used for support calling objects as constructors.
+ static Handle<Object> GetConstructorDelegate(Handle<Object> object);
};
diff --git a/deps/v8/src/factory.cc b/deps/v8/src/factory.cc
index 4b0b7f51f..fad3e9c28 100644
--- a/deps/v8/src/factory.cc
+++ b/deps/v8/src/factory.cc
@@ -33,7 +33,8 @@
#include "factory.h"
#include "macro-assembler.h"
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
Handle<FixedArray> Factory::NewFixedArray(int size, PretenureFlag pretenure) {
@@ -176,9 +177,12 @@ Handle<Script> Factory::NewScript(Handle<String> source) {
script->set_column_offset(Smi::FromInt(0));
script->set_data(Heap::undefined_value());
script->set_context_data(Heap::undefined_value());
- script->set_type(Smi::FromInt(SCRIPT_TYPE_NORMAL));
+ script->set_type(Smi::FromInt(Script::TYPE_NORMAL));
+ script->set_compilation_type(Smi::FromInt(Script::COMPILATION_TYPE_HOST));
script->set_wrapper(*wrapper);
script->set_line_ends(Heap::undefined_value());
+ script->set_eval_from_function(Heap::undefined_value());
+ script->set_eval_from_instructions_offset(Smi::FromInt(0));
return script;
}
@@ -509,8 +513,10 @@ Handle<JSFunction> Factory::NewFunctionWithPrototype(Handle<String> name,
}
-Handle<Code> Factory::NewCode(const CodeDesc& desc, ScopeInfo<>* sinfo,
- Code::Flags flags, Handle<Object> self_ref) {
+Handle<Code> Factory::NewCode(const CodeDesc& desc,
+ ZoneScopeInfo* sinfo,
+ Code::Flags flags,
+ Handle<Object> self_ref) {
CALL_HEAP_FUNCTION(Heap::CreateCode(desc, sinfo, flags, self_ref), Code);
}
diff --git a/deps/v8/src/factory.h b/deps/v8/src/factory.h
index 6ac2706ec..95dbee909 100644
--- a/deps/v8/src/factory.h
+++ b/deps/v8/src/factory.h
@@ -29,8 +29,10 @@
#define V8_FACTORY_H_
#include "heap.h"
+#include "zone-inl.h"
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
// Interface for handle based allocation.
@@ -202,8 +204,10 @@ class Factory : public AllStatic {
Handle<JSFunction> boilerplate,
Handle<Context> context);
- static Handle<Code> NewCode(const CodeDesc& desc, ScopeInfo<>* sinfo,
- Code::Flags flags, Handle<Object> self_reference);
+ static Handle<Code> NewCode(const CodeDesc& desc,
+ ZoneScopeInfo* sinfo,
+ Code::Flags flags,
+ Handle<Object> self_reference);
static Handle<Code> CopyCode(Handle<Code> code);
diff --git a/deps/v8/src/flag-definitions.h b/deps/v8/src/flag-definitions.h
index db494532b..13e41e34f 100644
--- a/deps/v8/src/flag-definitions.h
+++ b/deps/v8/src/flag-definitions.h
@@ -133,6 +133,9 @@ DEFINE_bool(strict, false, "strict error checking")
DEFINE_int(min_preparse_length, 1024,
"Minimum length for automatic enable preparsing")
+// compilation-cache.cc
+DEFINE_bool(compilation_cache, true, "enable compilation cache")
+
// debug.cc
DEFINE_bool(remote_debugging, false, "enable remote debugging")
DEFINE_bool(trace_debug_json, false, "trace debugging JSON request/response")
@@ -333,6 +336,9 @@ DEFINE_bool(prof, false,
"Log statistical profiling information (implies --log-code).")
DEFINE_bool(prof_auto, true,
"Used with --prof, starts profiling automatically")
+DEFINE_bool(prof_lazy, false,
+ "Used with --prof, only does sampling and logging"
+ " when profiler is active (implies --noprof_auto).")
DEFINE_bool(log_regexp, false, "Log regular expression execution.")
DEFINE_bool(sliding_state_window, false,
"Update sliding state window counters.")
diff --git a/deps/v8/src/flags.cc b/deps/v8/src/flags.cc
index 733d31a23..5df3afd5f 100644
--- a/deps/v8/src/flags.cc
+++ b/deps/v8/src/flags.cc
@@ -35,7 +35,8 @@
#include "string-stream.h"
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
// Define all of our flags.
#define FLAG_MODE_DEFINE
@@ -86,9 +87,9 @@ struct Flag {
return *reinterpret_cast<const char**>(valptr_);
}
- void set_string_value(const char *value, bool owns_ptr) {
+ void set_string_value(const char* value, bool owns_ptr) {
ASSERT(type_ == TYPE_STRING);
- const char **ptr = reinterpret_cast<const char **>(valptr_);
+ const char** ptr = reinterpret_cast<const char**>(valptr_);
if (owns_ptr_ && *ptr != NULL) DeleteArray(*ptr);
*ptr = value;
owns_ptr_ = owns_ptr;
diff --git a/deps/v8/src/flags.h b/deps/v8/src/flags.h
index e6cbe3c23..a8eca95c2 100644
--- a/deps/v8/src/flags.h
+++ b/deps/v8/src/flags.h
@@ -29,7 +29,8 @@
#include "checks.h"
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
// Declare all of our flags.
#define FLAG_MODE_DECLARE
diff --git a/deps/v8/src/frame-element.h b/deps/v8/src/frame-element.h
new file mode 100644
index 000000000..d16eb481a
--- /dev/null
+++ b/deps/v8/src/frame-element.h
@@ -0,0 +1,265 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_FRAME_ELEMENT_H_
+#define V8_FRAME_ELEMENT_H_
+
+#include "register-allocator-inl.h"
+
+namespace v8 {
+namespace internal {
+
+// -------------------------------------------------------------------------
+// Virtual frame elements
+//
+// The internal elements of the virtual frames. There are several kinds of
+// elements:
+// * Invalid: elements that are uninitialized or not actually part
+// of the virtual frame. They should not be read.
+// * Memory: an element that resides in the actual frame. Its address is
+// given by its position in the virtual frame.
+// * Register: an element that resides in a register.
+// * Constant: an element whose value is known at compile time.
+
+class FrameElement BASE_EMBEDDED {
+ public:
+ enum SyncFlag {
+ NOT_SYNCED,
+ SYNCED
+ };
+
+ // The default constructor creates an invalid frame element.
+ FrameElement() {
+ value_ = StaticTypeField::encode(StaticType::UNKNOWN_TYPE)
+ | TypeField::encode(INVALID)
+ | CopiedField::encode(false)
+ | SyncedField::encode(false)
+ | DataField::encode(0);
+ }
+
+ // Factory function to construct an invalid frame element.
+ static FrameElement InvalidElement() {
+ FrameElement result;
+ return result;
+ }
+
+ // Factory function to construct an in-memory frame element.
+ static FrameElement MemoryElement() {
+ FrameElement result(MEMORY, no_reg, SYNCED);
+ return result;
+ }
+
+ // Factory function to construct an in-register frame element.
+ static FrameElement RegisterElement(Register reg,
+ SyncFlag is_synced,
+ StaticType static_type = StaticType()) {
+ return FrameElement(REGISTER, reg, is_synced, static_type);
+ }
+
+ // Factory function to construct a frame element whose value is known at
+ // compile time.
+ static FrameElement ConstantElement(Handle<Object> value,
+ SyncFlag is_synced) {
+ FrameElement result(value, is_synced);
+ return result;
+ }
+
+ // Static indirection table for handles to constants. If a frame
+ // element represents a constant, the data contains an index into
+ // this table of handles to the actual constants.
+ typedef ZoneList<Handle<Object> > ZoneObjectList;
+
+ static ZoneObjectList* ConstantList() {
+ static ZoneObjectList list(10);
+ return &list;
+ }
+
+ // Clear the constants indirection table.
+ static void ClearConstantList() {
+ ConstantList()->Clear();
+ }
+
+ bool is_synced() const { return SyncedField::decode(value_); }
+
+ void set_sync() {
+ ASSERT(type() != MEMORY);
+ value_ = value_ | SyncedField::encode(true);
+ }
+
+ void clear_sync() {
+ ASSERT(type() != MEMORY);
+ value_ = value_ & ~SyncedField::mask();
+ }
+
+ bool is_valid() const { return type() != INVALID; }
+ bool is_memory() const { return type() == MEMORY; }
+ bool is_register() const { return type() == REGISTER; }
+ bool is_constant() const { return type() == CONSTANT; }
+ bool is_copy() const { return type() == COPY; }
+
+ bool is_copied() const { return CopiedField::decode(value_); }
+ void set_copied() { value_ = value_ | CopiedField::encode(true); }
+ void clear_copied() { value_ = value_ & ~CopiedField::mask(); }
+
+ Register reg() const {
+ ASSERT(is_register());
+ uint32_t reg = DataField::decode(value_);
+ Register result;
+ result.code_ = reg;
+ return result;
+ }
+
+ Handle<Object> handle() const {
+ ASSERT(is_constant());
+ return ConstantList()->at(DataField::decode(value_));
+ }
+
+ int index() const {
+ ASSERT(is_copy());
+ return DataField::decode(value_);
+ }
+
+ StaticType static_type() {
+ return StaticType(StaticTypeField::decode(value_));
+ }
+
+ void set_static_type(StaticType static_type) {
+ value_ = value_ & ~StaticTypeField::mask();
+ value_ = value_ | StaticTypeField::encode(static_type.static_type_);
+ }
+
+ bool Equals(FrameElement other) {
+ uint32_t masked_difference = (value_ ^ other.value_) & ~CopiedField::mask();
+ if (!masked_difference) {
+ // The elements are equal if they agree exactly except on copied field.
+ return true;
+ } else {
+ // If two constants have the same value, and agree otherwise, return true.
+ return !(masked_difference & ~DataField::mask()) &&
+ is_constant() &&
+ handle().is_identical_to(other.handle());
+ }
+ }
+
+ // Test if two FrameElements refer to the same memory or register location.
+ bool SameLocation(FrameElement* other) {
+ if (type() == other->type()) {
+ if (value_ == other->value_) return true;
+ if (is_constant() && handle().is_identical_to(other->handle())) {
+ return true;
+ }
+ }
+ return false;
+ }
+
+ // Given a pair of non-null frame element pointers, return one of them
+ // as an entry frame candidate or null if they are incompatible.
+ FrameElement* Combine(FrameElement* other) {
+ // If either is invalid, the result is.
+ if (!is_valid()) return this;
+ if (!other->is_valid()) return other;
+
+ if (!SameLocation(other)) return NULL;
+ // If either is unsynced, the result is. The result static type is
+ // the merge of the static types. It's safe to set it on one of the
+ // frame elements, and harmless too (because we are only going to
+ // merge the reaching frames and will ensure that the types are
+ // coherent, and changing the static type does not emit code).
+ FrameElement* result = is_synced() ? other : this;
+ result->set_static_type(static_type().merge(other->static_type()));
+ return result;
+ }
+
+ private:
+ enum Type {
+ INVALID,
+ MEMORY,
+ REGISTER,
+ CONSTANT,
+ COPY
+ };
+
+ // Used to construct memory and register elements.
+ FrameElement(Type type, Register reg, SyncFlag is_synced) {
+ value_ = StaticTypeField::encode(StaticType::UNKNOWN_TYPE)
+ | TypeField::encode(type)
+ | CopiedField::encode(false)
+ | SyncedField::encode(is_synced != NOT_SYNCED)
+ | DataField::encode(reg.code_ > 0 ? reg.code_ : 0);
+ }
+
+ FrameElement(Type type, Register reg, SyncFlag is_synced, StaticType stype) {
+ value_ = StaticTypeField::encode(stype.static_type_)
+ | TypeField::encode(type)
+ | CopiedField::encode(false)
+ | SyncedField::encode(is_synced != NOT_SYNCED)
+ | DataField::encode(reg.code_ > 0 ? reg.code_ : 0);
+ }
+
+ // Used to construct constant elements.
+ FrameElement(Handle<Object> value, SyncFlag is_synced) {
+ value_ = StaticTypeField::encode(StaticType::TypeOf(*value).static_type_)
+ | TypeField::encode(CONSTANT)
+ | CopiedField::encode(false)
+ | SyncedField::encode(is_synced != NOT_SYNCED)
+ | DataField::encode(ConstantList()->length());
+ ConstantList()->Add(value);
+ }
+
+ Type type() const { return TypeField::decode(value_); }
+ void set_type(Type type) {
+ value_ = value_ & ~TypeField::mask();
+ value_ = value_ | TypeField::encode(type);
+ }
+
+ void set_index(int new_index) {
+ ASSERT(is_copy());
+ value_ = value_ & ~DataField::mask();
+ value_ = value_ | DataField::encode(new_index);
+ }
+
+ void set_reg(Register new_reg) {
+ ASSERT(is_register());
+ value_ = value_ & ~DataField::mask();
+ value_ = value_ | DataField::encode(new_reg.code_);
+ }
+
+ // Encode static type, type, copied, synced and data in one 32 bit integer.
+ uint32_t value_;
+
+ class StaticTypeField: public BitField<StaticType::StaticTypeEnum, 0, 3> {};
+ class TypeField: public BitField<Type, 3, 3> {};
+ class CopiedField: public BitField<uint32_t, 6, 1> {};
+ class SyncedField: public BitField<uint32_t, 7, 1> {};
+ class DataField: public BitField<uint32_t, 8, 32 - 9> {};
+
+ friend class VirtualFrame;
+};
+
+} } // namespace v8::internal
+
+#endif // V8_FRAME_ELEMENT_H_
diff --git a/deps/v8/src/frames-inl.h b/deps/v8/src/frames-inl.h
index bf46f6bf7..28be43066 100644
--- a/deps/v8/src/frames-inl.h
+++ b/deps/v8/src/frames-inl.h
@@ -38,7 +38,8 @@
#include "arm/frames-arm.h"
#endif
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
inline Address StackHandler::address() const {
diff --git a/deps/v8/src/frames.cc b/deps/v8/src/frames.cc
index 1eedbf640..dd0ea00c0 100644
--- a/deps/v8/src/frames.cc
+++ b/deps/v8/src/frames.cc
@@ -34,7 +34,8 @@
#include "top.h"
#include "zone-inl.h"
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
// Iterator that supports traversing the stack handlers of a
// particular frame. Needs to know the top of the handler chain.
diff --git a/deps/v8/src/frames.h b/deps/v8/src/frames.h
index 8ab4be905..e250609fd 100644
--- a/deps/v8/src/frames.h
+++ b/deps/v8/src/frames.h
@@ -28,7 +28,8 @@
#ifndef V8_FRAMES_H_
#define V8_FRAMES_H_
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
typedef uint32_t RegList;
@@ -442,7 +443,8 @@ class ArgumentsAdaptorFrame: public JavaScriptFrame {
// the sentinel as its context, it is an arguments adaptor frame. It
// must be tagged as a small integer to avoid GC issues. Crud.
enum {
- SENTINEL = (1 << kSmiTagSize) | kSmiTag
+ SENTINEL = (1 << kSmiTagSize) | kSmiTag,
+ NON_SENTINEL = ~SENTINEL
};
virtual Type type() const { return ARGUMENTS_ADAPTOR; }
diff --git a/deps/v8/src/func-name-inferrer.cc b/deps/v8/src/func-name-inferrer.cc
index 75f7a9937..2d6a86a6f 100644
--- a/deps/v8/src/func-name-inferrer.cc
+++ b/deps/v8/src/func-name-inferrer.cc
@@ -30,7 +30,8 @@
#include "ast.h"
#include "func-name-inferrer.h"
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
void FuncNameInferrer::PushEnclosingName(Handle<String> name) {
diff --git a/deps/v8/src/func-name-inferrer.h b/deps/v8/src/func-name-inferrer.h
index d8270c364..e88586a44 100644
--- a/deps/v8/src/func-name-inferrer.h
+++ b/deps/v8/src/func-name-inferrer.h
@@ -28,47 +28,53 @@
#ifndef V8_FUNC_NAME_INFERRER_H_
#define V8_FUNC_NAME_INFERRER_H_
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
// FuncNameInferrer is a stateful class that is used to perform name
// inference for anonymous functions during static analysis of source code.
// Inference is performed in cases when an anonymous function is assigned
// to a variable or a property (see test-func-name-inference.cc for examples.)
-
+//
// The basic idea is that during AST traversal LHSs of expressions are
// always visited before RHSs. Thus, during visiting the LHS, a name can be
// collected, and during visiting the RHS, a function literal can be collected.
// Inference is performed while leaving the assignment node.
-
class FuncNameInferrer BASE_EMBEDDED {
public:
- FuncNameInferrer() :
- entries_stack_(10),
- names_stack_(5),
- funcs_to_infer_(4),
- dot_(Factory::NewStringFromAscii(CStrVector("."))) {
+ FuncNameInferrer()
+ : entries_stack_(10),
+ names_stack_(5),
+ funcs_to_infer_(4),
+ dot_(Factory::NewStringFromAscii(CStrVector("."))) {
}
+ // Returns whether we have entered name collection state.
bool IsOpen() const { return !entries_stack_.is_empty(); }
+ // Pushes an enclosing the name of enclosing function onto names stack.
void PushEnclosingName(Handle<String> name);
+ // Enters name collection state.
void Enter() {
entries_stack_.Add(names_stack_.length());
}
+ // Pushes an encountered name onto names stack when in collection state.
void PushName(Handle<String> name) {
if (IsOpen()) {
names_stack_.Add(name);
}
}
+ // Adds a function to infer name for.
void AddFunction(FunctionLiteral* func_to_infer) {
if (IsOpen()) {
funcs_to_infer_.Add(func_to_infer);
}
}
+ // Infers a function name and leaves names collection state.
void InferAndLeave() {
ASSERT(IsOpen());
if (!funcs_to_infer_.is_empty()) {
@@ -78,13 +84,18 @@ class FuncNameInferrer BASE_EMBEDDED {
}
private:
+ // Constructs a full name in dotted notation from gathered names.
Handle<String> MakeNameFromStack();
+
+ // A helper function for MakeNameFromStack.
Handle<String> MakeNameFromStackHelper(int pos, Handle<String> prev);
+
+ // Performs name inferring for added functions.
void InferFunctionsNames();
- List<int> entries_stack_;
- List<Handle<String> > names_stack_;
- List<FunctionLiteral*> funcs_to_infer_;
+ ZoneList<int> entries_stack_;
+ ZoneList<Handle<String> > names_stack_;
+ ZoneList<FunctionLiteral*> funcs_to_infer_;
Handle<String> dot_;
DISALLOW_COPY_AND_ASSIGN(FuncNameInferrer);
@@ -95,15 +106,17 @@ class FuncNameInferrer BASE_EMBEDDED {
// leaving scope.
class ScopedFuncNameInferrer BASE_EMBEDDED {
public:
- explicit ScopedFuncNameInferrer(FuncNameInferrer* inferrer) :
- inferrer_(inferrer),
- is_entered_(false) {}
+ explicit ScopedFuncNameInferrer(FuncNameInferrer* inferrer)
+ : inferrer_(inferrer),
+ is_entered_(false) {}
+
~ScopedFuncNameInferrer() {
if (is_entered_) {
inferrer_->InferAndLeave();
}
}
+ // Triggers the wrapped inferrer into name collection state.
void Enter() {
inferrer_->Enter();
is_entered_ = true;
diff --git a/deps/v8/src/global-handles.cc b/deps/v8/src/global-handles.cc
index 46b7db322..ed4e262e6 100644
--- a/deps/v8/src/global-handles.cc
+++ b/deps/v8/src/global-handles.cc
@@ -30,7 +30,8 @@
#include "api.h"
#include "global-handles.h"
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
class GlobalHandles::Node : public Malloced {
public:
diff --git a/deps/v8/src/global-handles.h b/deps/v8/src/global-handles.h
index e6e9de1d1..9e63ba7a9 100644
--- a/deps/v8/src/global-handles.h
+++ b/deps/v8/src/global-handles.h
@@ -30,7 +30,8 @@
#include "list-inl.h"
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
// Structure for tracking global handles.
// A single list keeps all the allocated global handles.
diff --git a/deps/v8/src/globals.h b/deps/v8/src/globals.h
index a0b5ac363..2b0fe15d1 100644
--- a/deps/v8/src/globals.h
+++ b/deps/v8/src/globals.h
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2006-2009 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -28,7 +28,8 @@
#ifndef V8_GLOBALS_H_
#define V8_GLOBALS_H_
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
// Processor architecture detection. For more info on what's defined, see:
// http://msdn.microsoft.com/en-us/library/b0084kay.aspx
@@ -77,16 +78,23 @@ typedef byte* Address;
#define V8_UINT64_C(x) (x ## UI64)
#define V8_INT64_C(x) (x ## I64)
#define V8_PTR_PREFIX "ll"
-#else
+#else // _MSC_VER
#define V8_UINT64_C(x) (x ## UL)
#define V8_INT64_C(x) (x ## L)
#define V8_PTR_PREFIX "l"
-#endif
+#endif // _MSC_VER
#else // V8_HOST_ARCH_64_BIT
#define V8_PTR_PREFIX ""
-#endif
+#endif // V8_HOST_ARCH_64_BIT
-#define V8PRIp V8_PTR_PREFIX "x"
+#define V8PRIxPTR V8_PTR_PREFIX "x"
+#define V8PRIdPTR V8_PTR_PREFIX "d"
+
+// Fix for Mac OS X defining uintptr_t as "unsigned long":
+#if defined(__APPLE__) && defined(__MACH__)
+#undef V8PRIxPTR
+#define V8PRIxPTR "lx"
+#endif
// Code-point values in Unicode 4.0 are 21 bits wide.
typedef uint16_t uc16;
@@ -103,11 +111,12 @@ const int kMinInt = -kMaxInt - 1;
const uint32_t kMaxUInt32 = 0xFFFFFFFFu;
-const int kCharSize = sizeof(char); // NOLINT
-const int kShortSize = sizeof(short); // NOLINT
-const int kIntSize = sizeof(int); // NOLINT
-const int kDoubleSize = sizeof(double); // NOLINT
-const int kPointerSize = sizeof(void*); // NOLINT
+const int kCharSize = sizeof(char); // NOLINT
+const int kShortSize = sizeof(short); // NOLINT
+const int kIntSize = sizeof(int); // NOLINT
+const int kDoubleSize = sizeof(double); // NOLINT
+const int kPointerSize = sizeof(void*); // NOLINT
+const int kIntptrSize = sizeof(intptr_t); // NOLINT
#if V8_HOST_ARCH_64_BIT
const int kPointerSizeLog2 = 3;
@@ -116,9 +125,12 @@ const int kPointerSizeLog2 = 2;
#endif
const int kObjectAlignmentBits = kPointerSizeLog2;
-const intptr_t kObjectAlignmentMask = (1 << kObjectAlignmentBits) - 1;
const intptr_t kObjectAlignment = 1 << kObjectAlignmentBits;
+const intptr_t kObjectAlignmentMask = kObjectAlignment - 1;
+// Desired alignment for pointers.
+const intptr_t kPointerAlignment = (1 << kPointerSizeLog2);
+const intptr_t kPointerAlignmentMask = kPointerAlignment - 1;
// Tag information for HeapObject.
const int kHeapObjectTag = 1;
@@ -232,6 +244,7 @@ class ObjectGroup;
class TickSample;
class VirtualMemory;
class Mutex;
+class ZoneScopeInfo;
typedef bool (*WeakSlotCallback)(Object** pointer);
@@ -314,8 +327,6 @@ typedef void (*InlineCacheCallback)(Code* code, Address ic);
enum InlineCacheState {
// Has never been executed.
UNINITIALIZED,
- // Has never been executed, but is in a loop.
- UNINITIALIZED_IN_LOOP,
// Has been executed but monomorhic state has been delayed.
PREMONOMORPHIC,
// Has been executed and only one receiver type has been seen.
@@ -330,6 +341,12 @@ enum InlineCacheState {
};
+enum InLoopFlag {
+ NOT_IN_LOOP,
+ IN_LOOP
+};
+
+
// Type of properties.
// Order of properties is significant.
// Must fit in the BitField PropertyDetails::TypeField.
@@ -411,7 +428,11 @@ enum StateTag {
// OBJECT_SIZE_ALIGN returns the value aligned HeapObject size
#define OBJECT_SIZE_ALIGN(value) \
- ((value + kObjectAlignmentMask) & ~kObjectAlignmentMask)
+ (((value) + kObjectAlignmentMask) & ~kObjectAlignmentMask)
+
+// POINTER_SIZE_ALIGN returns the value aligned as a pointer.
+#define POINTER_SIZE_ALIGN(value) \
+ (((value) + kPointerAlignmentMask) & ~kPointerAlignmentMask)
// The expression OFFSET_OF(type, field) computes the byte-offset
// of the specified field relative to the containing type. This
diff --git a/deps/v8/src/handles-inl.h b/deps/v8/src/handles-inl.h
index e5899e3ac..6013c5b51 100644
--- a/deps/v8/src/handles-inl.h
+++ b/deps/v8/src/handles-inl.h
@@ -33,7 +33,8 @@
#include "handles.h"
#include "api.h"
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
template<class T>
Handle<T>::Handle(T* obj) {
diff --git a/deps/v8/src/handles.cc b/deps/v8/src/handles.cc
index 773483d60..44ca60221 100644
--- a/deps/v8/src/handles.cc
+++ b/deps/v8/src/handles.cc
@@ -37,7 +37,8 @@
#include "natives.h"
#include "runtime.h"
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
v8::ImplementationUtilities::HandleScopeData HandleScope::current_ =
@@ -221,6 +222,12 @@ Handle<Object> ForceSetProperty(Handle<JSObject> object,
}
+Handle<Object> ForceDeleteProperty(Handle<JSObject> object,
+ Handle<Object> key) {
+ CALL_HEAP_FUNCTION(Runtime::ForceDeleteObjectProperty(object, key), Object);
+}
+
+
Handle<Object> IgnoreAttributesAndSetLocalProperty(
Handle<JSObject> object,
Handle<String> key,
@@ -230,6 +237,7 @@ Handle<Object> IgnoreAttributesAndSetLocalProperty(
IgnoreAttributesAndSetLocalProperty(*key, *value, attributes), Object);
}
+
Handle<Object> SetPropertyWithInterceptor(Handle<JSObject> object,
Handle<String> key,
Handle<Object> value,
@@ -273,19 +281,49 @@ Handle<Object> GetPrototype(Handle<Object> obj) {
Handle<Object> GetHiddenProperties(Handle<JSObject> obj,
bool create_if_needed) {
- CALL_HEAP_FUNCTION(obj->GetHiddenProperties(create_if_needed), Object);
+ Handle<String> key = Factory::hidden_symbol();
+
+ if (obj->HasFastProperties()) {
+ // If the object has fast properties, check whether the first slot
+ // in the descriptor array matches the hidden symbol. Since the
+ // hidden symbols hash code is zero (and no other string has hash
+ // code zero) it will always occupy the first entry if present.
+ DescriptorArray* descriptors = obj->map()->instance_descriptors();
+ DescriptorReader r(descriptors, 0); // Explicitly position reader at zero.
+ if (!r.eos() && (r.GetKey() == *key) && r.IsProperty()) {
+ ASSERT(r.type() == FIELD);
+ return Handle<Object>(obj->FastPropertyAt(r.GetFieldIndex()));
+ }
+ }
+
+ // Only attempt to find the hidden properties in the local object and not
+ // in the prototype chain. Note that HasLocalProperty() can cause a GC in
+ // the general case in the presence of interceptors.
+ if (!obj->HasLocalProperty(*key)) {
+ // Hidden properties object not found. Allocate a new hidden properties
+ // object if requested. Otherwise return the undefined value.
+ if (create_if_needed) {
+ Handle<Object> hidden_obj = Factory::NewJSObject(Top::object_function());
+ return SetProperty(obj, key, hidden_obj, DONT_ENUM);
+ } else {
+ return Factory::undefined_value();
+ }
+ }
+ return GetProperty(obj, key);
}
Handle<Object> DeleteElement(Handle<JSObject> obj,
uint32_t index) {
- CALL_HEAP_FUNCTION(obj->DeleteElement(index), Object);
+ CALL_HEAP_FUNCTION(obj->DeleteElement(index, JSObject::NORMAL_DELETION),
+ Object);
}
Handle<Object> DeleteProperty(Handle<JSObject> obj,
Handle<String> prop) {
- CALL_HEAP_FUNCTION(obj->DeleteProperty(*prop), Object);
+ CALL_HEAP_FUNCTION(obj->DeleteProperty(*prop, JSObject::NORMAL_DELETION),
+ Object);
}
diff --git a/deps/v8/src/handles.h b/deps/v8/src/handles.h
index 652d6c70e..af638b804 100644
--- a/deps/v8/src/handles.h
+++ b/deps/v8/src/handles.h
@@ -30,7 +30,8 @@
#include "apiutils.h"
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
// ----------------------------------------------------------------------------
// A Handle provides a reference to an object that survives relocation by
@@ -118,15 +119,15 @@ class HandleScope {
static int NumberOfHandles();
// Creates a new handle with the given value.
- static inline void** CreateHandle(void* value) {
+ static inline Object** CreateHandle(Object* value) {
void** result = current_.next;
if (result == current_.limit) result = Extend();
// Update the current next field, set the value in the created
// handle, and return the result.
ASSERT(result < current_.limit);
current_.next = result + 1;
- *result = value;
- return result;
+ *reinterpret_cast<Object**>(result) = value;
+ return reinterpret_cast<Object**>(result);
}
private:
@@ -201,6 +202,9 @@ Handle<Object> ForceSetProperty(Handle<JSObject> object,
Handle<Object> value,
PropertyAttributes attributes);
+Handle<Object> ForceDeleteProperty(Handle<JSObject> object,
+ Handle<Object> key);
+
Handle<Object> IgnoreAttributesAndSetLocalProperty(Handle<JSObject> object,
Handle<String> key,
Handle<Object> value,
@@ -228,6 +232,9 @@ Handle<Object> GetPropertyWithInterceptor(Handle<JSObject> receiver,
Handle<Object> GetPrototype(Handle<Object> obj);
+// Return the object's hidden properties object. If the object has no hidden
+// properties and create_if_needed is true, then a new hidden property object
+// will be allocated. Otherwise the Heap::undefined_value is returned.
Handle<Object> GetHiddenProperties(Handle<JSObject> obj, bool create_if_needed);
Handle<Object> DeleteElement(Handle<JSObject> obj, uint32_t index);
diff --git a/deps/v8/src/hashmap.cc b/deps/v8/src/hashmap.cc
index 126f73924..b7173127e 100644
--- a/deps/v8/src/hashmap.cc
+++ b/deps/v8/src/hashmap.cc
@@ -29,14 +29,8 @@
#include "hashmap.h"
-namespace v8 { namespace internal {
-
-
-static inline bool IsPowerOf2(uint32_t x) {
- ASSERT(x != 0);
- return (x & (x - 1)) == 0;
-}
-
+namespace v8 {
+namespace internal {
Allocator HashMap::DefaultAllocator;
@@ -66,7 +60,7 @@ HashMap::~HashMap() {
HashMap::Entry* HashMap::Lookup(void* key, uint32_t hash, bool insert) {
// Find a matching entry.
Entry* p = Probe(key, hash);
- if (p->key != NULL) {
+ if (p->key != NULL) {
return p;
}
@@ -91,6 +85,65 @@ HashMap::Entry* HashMap::Lookup(void* key, uint32_t hash, bool insert) {
}
+void HashMap::Remove(void* key, uint32_t hash) {
+ // Lookup the entry for the key to remove.
+ Entry* p = Probe(key, hash);
+ if (p->key == NULL) {
+ // Key not found nothing to remove.
+ return;
+ }
+
+ // To remove an entry we need to ensure that it does not create an empty
+ // entry that will cause the search for another entry to stop too soon. If all
+ // the entries between the entry to remove and the next empty slot have their
+ // initial position inside this interval, clearing the entry to remove will
+ // not break the search. If, while searching for the next empty entry, an
+ // entry is encountered which does not have its initial position between the
+ // entry to remove and the position looked at, then this entry can be moved to
+ // the place of the entry to remove without breaking the search for it. The
+ // entry made vacant by this move is now the entry to remove and the process
+ // starts over.
+ // Algorithm from http://en.wikipedia.org/wiki/Open_addressing.
+
+ // This guarantees loop termination as there is at least one empty entry so
+ // eventually the removed entry will have an empty entry after it.
+ ASSERT(occupancy_ < capacity_);
+
+ // p is the candidate entry to clear. q is used to scan forwards.
+ Entry* q = p; // Start at the entry to remove.
+ while (true) {
+ // Move q to the next entry.
+ q = q + 1;
+ if (q == map_end()) {
+ q = map_;
+ }
+
+ // All entries between p and q have their initial position between p and q
+ // and the entry p can be cleared without breaking the search for these
+ // entries.
+ if (q->key == NULL) {
+ break;
+ }
+
+ // Find the initial position for the entry at position q.
+ Entry* r = map_ + (q->hash & (capacity_ - 1));
+
+ // If the entry at position q has its initial position outside the range
+ // between p and q it can be moved forward to position p and will still be
+ // found. There is now a new candidate entry for clearing.
+ if ((q > p && (r <= p || r > q)) ||
+ (q < p && (r <= p && r > q))) {
+ *p = *q;
+ p = q;
+ }
+ }
+
+ // Clear the entry which is allowed to en emptied.
+ p->key = NULL;
+ occupancy_--;
+}
+
+
void HashMap::Clear() {
// Mark all entries as empty.
const Entry* end = map_end();
@@ -126,7 +179,7 @@ HashMap::Entry* HashMap::Probe(void* key, uint32_t hash) {
const Entry* end = map_end();
ASSERT(map_ <= p && p < end);
- ASSERT(occupancy_ < capacity_); // guarantees loop termination
+ ASSERT(occupancy_ < capacity_); // Guarantees loop termination.
while (p->key != NULL && (hash != p->hash || !match_(key, p->key))) {
p++;
if (p >= end) {
diff --git a/deps/v8/src/hashmap.h b/deps/v8/src/hashmap.h
index fabf3dc3e..b92c71573 100644
--- a/deps/v8/src/hashmap.h
+++ b/deps/v8/src/hashmap.h
@@ -28,7 +28,8 @@
#ifndef V8_HASHMAP_H_
#define V8_HASHMAP_H_
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
// Allocator defines the memory allocator interface
@@ -75,6 +76,9 @@ class HashMap {
// Otherwise, NULL is returned.
Entry* Lookup(void* key, uint32_t hash, bool insert);
+ // Removes the entry with matching key.
+ void Remove(void* key, uint32_t hash);
+
// Empties the hash map (occupancy() == 0).
void Clear();
diff --git a/deps/v8/src/heap-inl.h b/deps/v8/src/heap-inl.h
index 86165ee1b..8dd09d77d 100644
--- a/deps/v8/src/heap-inl.h
+++ b/deps/v8/src/heap-inl.h
@@ -31,7 +31,8 @@
#include "log.h"
#include "v8-counters.h"
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
int Heap::MaxHeapObjectSize() {
return Page::kMaxHeapObjectSize;
@@ -145,7 +146,9 @@ void Heap::RecordWrite(Address address, int offset) {
if (new_space_.Contains(address)) return;
ASSERT(!new_space_.FromSpaceContains(address));
SLOW_ASSERT(Contains(address + offset));
+#ifndef V8_HOST_ARCH_64_BIT
Page::SetRSet(address, offset);
+#endif // V8_HOST_ARCH_64_BIT
}
@@ -191,6 +194,27 @@ void Heap::CopyBlock(Object** dst, Object** src, int byte_size) {
}
+void Heap::ScavengeObject(HeapObject** p, HeapObject* object) {
+ ASSERT(InFromSpace(object));
+
+ // We use the first word (where the map pointer usually is) of a heap
+ // object to record the forwarding pointer. A forwarding pointer can
+ // point to an old space, the code space, or the to space of the new
+ // generation.
+ MapWord first_word = object->map_word();
+
+ // If the first word is a forwarding address, the object has already been
+ // copied.
+ if (first_word.IsForwardingAddress()) {
+ *p = first_word.ToForwardingAddress();
+ return;
+ }
+
+ // Call the slow part of scavenge object.
+ return ScavengeObjectSlow(p, object);
+}
+
+
Object* Heap::GetKeyedLookupCache() {
if (keyed_lookup_cache()->IsUndefined()) {
Object* obj = LookupCache::Allocate(4);
diff --git a/deps/v8/src/heap.cc b/deps/v8/src/heap.cc
index 6d600152e..772cf329c 100644
--- a/deps/v8/src/heap.cc
+++ b/deps/v8/src/heap.cc
@@ -40,7 +40,8 @@
#include "scopeinfo.h"
#include "v8threads.h"
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
#define ROOT_ALLOCATION(type, name) type* Heap::name##_;
ROOT_LIST(ROOT_ALLOCATION)
@@ -283,6 +284,9 @@ void Heap::GarbageCollectionEpilogue() {
#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
ReportStatisticsAfterGC();
#endif
+#ifdef ENABLE_DEBUGGER_SUPPORT
+ Debug::AfterGarbageCollection();
+#endif
}
@@ -537,8 +541,39 @@ class ScavengeVisitor: public ObjectVisitor {
};
+// A queue of pointers and maps of to-be-promoted objects during a
+// scavenge collection.
+class PromotionQueue {
+ public:
+ void Initialize(Address start_address) {
+ front_ = rear_ = reinterpret_cast<HeapObject**>(start_address);
+ }
+
+ bool is_empty() { return front_ <= rear_; }
+
+ void insert(HeapObject* object, Map* map) {
+ *(--rear_) = object;
+ *(--rear_) = map;
+ // Assert no overflow into live objects.
+ ASSERT(reinterpret_cast<Address>(rear_) >= Heap::new_space()->top());
+ }
+
+ void remove(HeapObject** object, Map** map) {
+ *object = *(--front_);
+ *map = Map::cast(*(--front_));
+ // Assert no underflow.
+ ASSERT(front_ >= rear_);
+ }
+
+ private:
+ // The front of the queue is higher in memory than the rear.
+ HeapObject** front_;
+ HeapObject** rear_;
+};
+
+
// Shared state read by the scavenge collector and set by ScavengeObject.
-static Address promoted_rear = NULL;
+static PromotionQueue promotion_queue;
#ifdef DEBUG
@@ -624,8 +659,7 @@ void Heap::Scavenge() {
// frees up its size in bytes from the top of the new space, and
// objects are at least one pointer in size.
Address new_space_front = new_space_.ToSpaceLow();
- Address promoted_front = new_space_.ToSpaceHigh();
- promoted_rear = new_space_.ToSpaceHigh();
+ promotion_queue.Initialize(new_space_.ToSpaceHigh());
ScavengeVisitor scavenge_visitor;
// Copy roots.
@@ -634,15 +668,36 @@ void Heap::Scavenge() {
// Copy objects reachable from weak pointers.
GlobalHandles::IterateWeakRoots(&scavenge_visitor);
+#if V8_HOST_ARCH_64_BIT
+ // TODO(X64): Make this go away again. We currently disable RSets for
+ // 64-bit-mode.
+ HeapObjectIterator old_pointer_iterator(old_pointer_space_);
+ while (old_pointer_iterator.has_next()) {
+ HeapObject* heap_object = old_pointer_iterator.next();
+ heap_object->Iterate(&scavenge_visitor);
+ }
+ HeapObjectIterator map_iterator(map_space_);
+ while (map_iterator.has_next()) {
+ HeapObject* heap_object = map_iterator.next();
+ heap_object->Iterate(&scavenge_visitor);
+ }
+ LargeObjectIterator lo_iterator(lo_space_);
+ while (lo_iterator.has_next()) {
+ HeapObject* heap_object = lo_iterator.next();
+ if (heap_object->IsFixedArray()) {
+ heap_object->Iterate(&scavenge_visitor);
+ }
+ }
+#else // V8_HOST_ARCH_64_BIT
// Copy objects reachable from the old generation. By definition,
// there are no intergenerational pointers in code or data spaces.
IterateRSet(old_pointer_space_, &ScavengePointer);
IterateRSet(map_space_, &ScavengePointer);
lo_space_->IterateRSet(&ScavengePointer);
+#endif // V8_HOST_ARCH_64_BIT
do {
ASSERT(new_space_front <= new_space_.top());
- ASSERT(promoted_front >= promoted_rear);
// The addresses new_space_front and new_space_.top() define a
// queue of unprocessed copied objects. Process them until the
@@ -653,15 +708,26 @@ void Heap::Scavenge() {
new_space_front += object->Size();
}
- // The addresses promoted_front and promoted_rear define a queue
- // of unprocessed addresses of promoted objects. Process them
- // until the queue is empty.
- while (promoted_front > promoted_rear) {
- promoted_front -= kPointerSize;
- HeapObject* object =
- HeapObject::cast(Memory::Object_at(promoted_front));
- object->Iterate(&scavenge_visitor);
- UpdateRSet(object);
+ // Promote and process all the to-be-promoted objects.
+ while (!promotion_queue.is_empty()) {
+ HeapObject* source;
+ Map* map;
+ promotion_queue.remove(&source, &map);
+ // Copy the from-space object to its new location (given by the
+ // forwarding address) and fix its map.
+ HeapObject* target = source->map_word().ToForwardingAddress();
+ CopyBlock(reinterpret_cast<Object**>(target->address()),
+ reinterpret_cast<Object**>(source->address()),
+ source->SizeFromMap(map));
+ target->set_map(map);
+
+#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
+ // Update NewSpace stats if necessary.
+ RecordCopiedObject(target);
+#endif
+ // Visit the newly copied object for pointers to new space.
+ target->Iterate(&scavenge_visitor);
+ UpdateRSet(target);
}
// Take another spin if there are now unswept objects in new space
@@ -735,6 +801,8 @@ class UpdateRSetVisitor: public ObjectVisitor {
int Heap::UpdateRSet(HeapObject* obj) {
+#ifndef V8_HOST_ARCH_64_BIT
+ // TODO(X64) Reenable RSet when we have a working 64-bit layout of Page.
ASSERT(!InNewSpace(obj));
// Special handling of fixed arrays to iterate the body based on the start
// address and offset. Just iterating the pointers as in UpdateRSetVisitor
@@ -756,6 +824,7 @@ int Heap::UpdateRSet(HeapObject* obj) {
UpdateRSetVisitor v;
obj->Iterate(&v);
}
+#endif // V8_HOST_ARCH_64_BIT
return obj->Size();
}
@@ -818,8 +887,8 @@ HeapObject* Heap::MigrateObject(HeapObject* source,
// Set the forwarding address.
source->set_map_word(MapWord::FromForwardingAddress(target));
- // Update NewSpace stats if necessary.
#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
+ // Update NewSpace stats if necessary.
RecordCopiedObject(target);
#endif
@@ -827,28 +896,6 @@ HeapObject* Heap::MigrateObject(HeapObject* source,
}
-// Inlined function.
-void Heap::ScavengeObject(HeapObject** p, HeapObject* object) {
- ASSERT(InFromSpace(object));
-
- // We use the first word (where the map pointer usually is) of a heap
- // object to record the forwarding pointer. A forwarding pointer can
- // point to an old space, the code space, or the to space of the new
- // generation.
- MapWord first_word = object->map_word();
-
- // If the first word is a forwarding address, the object has already been
- // copied.
- if (first_word.IsForwardingAddress()) {
- *p = first_word.ToForwardingAddress();
- return;
- }
-
- // Call the slow part of scavenge object.
- return ScavengeObjectSlow(p, object);
-}
-
-
static inline bool IsShortcutCandidate(HeapObject* object, Map* map) {
STATIC_ASSERT(kNotStringTag != 0 && kSymbolTag != 0);
ASSERT(object->map() == map);
@@ -879,6 +926,11 @@ void Heap::ScavengeObjectSlow(HeapObject** p, HeapObject* object) {
}
int object_size = object->SizeFromMap(first_word.ToMap());
+ // We rely on live objects in new space to be at least two pointers,
+ // so we can store the from-space address and map pointer of promoted
+ // objects in the to space.
+ ASSERT(object_size >= 2 * kPointerSize);
+
// If the object should be promoted, we try to copy it to old space.
if (ShouldBePromoted(object->address(), object_size)) {
OldSpace* target_space = Heap::TargetSpace(object);
@@ -886,16 +938,29 @@ void Heap::ScavengeObjectSlow(HeapObject** p, HeapObject* object) {
target_space == Heap::old_data_space_);
Object* result = target_space->AllocateRaw(object_size);
if (!result->IsFailure()) {
- *p = MigrateObject(object, HeapObject::cast(result), object_size);
+ HeapObject* target = HeapObject::cast(result);
if (target_space == Heap::old_pointer_space_) {
- // Record the object's address at the top of the to space, to allow
- // it to be swept by the scavenger.
- promoted_rear -= kPointerSize;
- Memory::Object_at(promoted_rear) = *p;
+ // Save the from-space object pointer and its map pointer at the
+ // top of the to space to be swept and copied later. Write the
+ // forwarding address over the map word of the from-space
+ // object.
+ promotion_queue.insert(object, first_word.ToMap());
+ object->set_map_word(MapWord::FromForwardingAddress(target));
+
+ // Give the space allocated for the result a proper map by
+ // treating it as a free list node (not linked into the free
+ // list).
+ FreeListNode* node = FreeListNode::FromAddress(target->address());
+ node->set_size(object_size);
+
+ *p = target;
} else {
+ // Objects promoted to the data space can be copied immediately
+ // and not revisited---we will never sweep that space for
+ // pointers and the copied objects do not contain pointers to
+ // new space objects.
+ *p = MigrateObject(object, target, object_size);
#ifdef DEBUG
- // Objects promoted to the data space should not have pointers to
- // new space.
VerifyNonPointerSpacePointersVisitor v;
(*p)->Iterate(&v);
#endif
@@ -960,7 +1025,7 @@ bool Heap::CreateInitialMaps() {
meta_map_ = reinterpret_cast<Map*>(obj);
meta_map()->set_map(meta_map());
- obj = AllocatePartialMap(FIXED_ARRAY_TYPE, Array::kHeaderSize);
+ obj = AllocatePartialMap(FIXED_ARRAY_TYPE, FixedArray::kHeaderSize);
if (obj->IsFailure()) return false;
fixed_array_map_ = Map::cast(obj);
@@ -1017,37 +1082,37 @@ bool Heap::CreateInitialMaps() {
STRING_TYPE_LIST(ALLOCATE_STRING_MAP);
#undef ALLOCATE_STRING_MAP
- obj = AllocateMap(SHORT_STRING_TYPE, SeqTwoByteString::kHeaderSize);
+ obj = AllocateMap(SHORT_STRING_TYPE, SeqTwoByteString::kAlignedSize);
if (obj->IsFailure()) return false;
undetectable_short_string_map_ = Map::cast(obj);
undetectable_short_string_map_->set_is_undetectable();
- obj = AllocateMap(MEDIUM_STRING_TYPE, SeqTwoByteString::kHeaderSize);
+ obj = AllocateMap(MEDIUM_STRING_TYPE, SeqTwoByteString::kAlignedSize);
if (obj->IsFailure()) return false;
undetectable_medium_string_map_ = Map::cast(obj);
undetectable_medium_string_map_->set_is_undetectable();
- obj = AllocateMap(LONG_STRING_TYPE, SeqTwoByteString::kHeaderSize);
+ obj = AllocateMap(LONG_STRING_TYPE, SeqTwoByteString::kAlignedSize);
if (obj->IsFailure()) return false;
undetectable_long_string_map_ = Map::cast(obj);
undetectable_long_string_map_->set_is_undetectable();
- obj = AllocateMap(SHORT_ASCII_STRING_TYPE, SeqAsciiString::kHeaderSize);
+ obj = AllocateMap(SHORT_ASCII_STRING_TYPE, SeqAsciiString::kAlignedSize);
if (obj->IsFailure()) return false;
undetectable_short_ascii_string_map_ = Map::cast(obj);
undetectable_short_ascii_string_map_->set_is_undetectable();
- obj = AllocateMap(MEDIUM_ASCII_STRING_TYPE, SeqAsciiString::kHeaderSize);
+ obj = AllocateMap(MEDIUM_ASCII_STRING_TYPE, SeqAsciiString::kAlignedSize);
if (obj->IsFailure()) return false;
undetectable_medium_ascii_string_map_ = Map::cast(obj);
undetectable_medium_ascii_string_map_->set_is_undetectable();
- obj = AllocateMap(LONG_ASCII_STRING_TYPE, SeqAsciiString::kHeaderSize);
+ obj = AllocateMap(LONG_ASCII_STRING_TYPE, SeqAsciiString::kAlignedSize);
if (obj->IsFailure()) return false;
undetectable_long_ascii_string_map_ = Map::cast(obj);
undetectable_long_ascii_string_map_->set_is_undetectable();
- obj = AllocateMap(BYTE_ARRAY_TYPE, Array::kHeaderSize);
+ obj = AllocateMap(BYTE_ARRAY_TYPE, Array::kAlignedSize);
if (obj->IsFailure()) return false;
byte_array_map_ = Map::cast(obj);
@@ -1663,7 +1728,7 @@ void Heap::CreateFillerObjectAt(Address addr, int size) {
Object* Heap::CreateCode(const CodeDesc& desc,
- ScopeInfo<>* sinfo,
+ ZoneScopeInfo* sinfo,
Code::Flags flags,
Handle<Object> self_reference) {
// Compute size
@@ -2599,12 +2664,13 @@ void Heap::ZapFromSpace() {
#endif // DEBUG
-void Heap::IterateRSetRange(Address object_start,
- Address object_end,
- Address rset_start,
- ObjectSlotCallback copy_object_func) {
+int Heap::IterateRSetRange(Address object_start,
+ Address object_end,
+ Address rset_start,
+ ObjectSlotCallback copy_object_func) {
Address object_address = object_start;
Address rset_address = rset_start;
+ int set_bits_count = 0;
// Loop over all the pointers in [object_start, object_end).
while (object_address < object_end) {
@@ -2621,6 +2687,7 @@ void Heap::IterateRSetRange(Address object_start,
// If this pointer does not need to be remembered anymore, clear
// the remembered set bit.
if (!Heap::InNewSpace(*object_p)) result_rset &= ~bitmask;
+ set_bits_count++;
}
object_address += kPointerSize;
}
@@ -2634,6 +2701,7 @@ void Heap::IterateRSetRange(Address object_start,
}
rset_address += kIntSize;
}
+ return set_bits_count;
}
@@ -2641,11 +2709,20 @@ void Heap::IterateRSet(PagedSpace* space, ObjectSlotCallback copy_object_func) {
ASSERT(Page::is_rset_in_use());
ASSERT(space == old_pointer_space_ || space == map_space_);
+ static void* paged_rset_histogram = StatsTable::CreateHistogram(
+ "V8.RSetPaged",
+ 0,
+ Page::kObjectAreaSize / kPointerSize,
+ 30);
+
PageIterator it(space, PageIterator::PAGES_IN_USE);
while (it.has_next()) {
Page* page = it.next();
- IterateRSetRange(page->ObjectAreaStart(), page->AllocationTop(),
- page->RSetStart(), copy_object_func);
+ int count = IterateRSetRange(page->ObjectAreaStart(), page->AllocationTop(),
+ page->RSetStart(), copy_object_func);
+ if (paged_rset_histogram != NULL) {
+ StatsTable::AddHistogramSample(paged_rset_histogram, count);
+ }
}
}
diff --git a/deps/v8/src/heap.h b/deps/v8/src/heap.h
index ccc552f8d..d8080b6a8 100644
--- a/deps/v8/src/heap.h
+++ b/deps/v8/src/heap.h
@@ -28,7 +28,10 @@
#ifndef V8_HEAP_H_
#define V8_HEAP_H_
-namespace v8 { namespace internal {
+#include "zone-inl.h"
+
+namespace v8 {
+namespace internal {
// Defines all the roots in Heap.
#define STRONG_ROOT_LIST(V) \
@@ -570,7 +573,7 @@ class Heap : public AllStatic {
// object by containing this pointer.
// Please note this function does not perform a garbage collection.
static Object* CreateCode(const CodeDesc& desc,
- ScopeInfo<>* sinfo,
+ ZoneScopeInfo* sinfo,
Code::Flags flags,
Handle<Object> self_reference);
@@ -664,10 +667,11 @@ class Heap : public AllStatic {
// Iterates a range of remembered set addresses starting with rset_start
// corresponding to the range of allocated pointers
// [object_start, object_end).
- static void IterateRSetRange(Address object_start,
- Address object_end,
- Address rset_start,
- ObjectSlotCallback copy_object_func);
+ // Returns the number of bits that were set.
+ static int IterateRSetRange(Address object_start,
+ Address object_end,
+ Address rset_start,
+ ObjectSlotCallback copy_object_func);
// Returns whether the object resides in new space.
static inline bool InNewSpace(Object* object);
diff --git a/deps/v8/src/ia32/assembler-ia32-inl.h b/deps/v8/src/ia32/assembler-ia32-inl.h
index a3d24b276..045f17682 100644
--- a/deps/v8/src/ia32/assembler-ia32-inl.h
+++ b/deps/v8/src/ia32/assembler-ia32-inl.h
@@ -39,7 +39,8 @@
#include "cpu.h"
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
Condition NegateCondition(Condition cc) {
return static_cast<Condition>(cc ^ 1);
@@ -158,7 +159,7 @@ Immediate::Immediate(const char* s) {
}
-Immediate::Immediate(Label *internal_offset) {
+Immediate::Immediate(Label* internal_offset) {
x_ = reinterpret_cast<int32_t>(internal_offset);
rmode_ = RelocInfo::INTERNAL_REFERENCE;
}
@@ -277,6 +278,22 @@ void Operand::set_modrm(int mod, Register rm) {
}
+void Operand::set_sib(ScaleFactor scale, Register index, Register base) {
+ ASSERT(len_ == 1);
+ ASSERT((scale & -4) == 0);
+ // Use SIB with no index register only for base esp.
+ ASSERT(!index.is(esp) || base.is(esp));
+ buf_[1] = scale << 6 | index.code() << 3 | base.code();
+ len_ = 2;
+}
+
+
+void Operand::set_disp8(int8_t disp) {
+ ASSERT(len_ == 1 || len_ == 2);
+ *reinterpret_cast<int8_t*>(&buf_[len_++]) = disp;
+}
+
+
void Operand::set_dispr(int32_t disp, RelocInfo::Mode rmode) {
ASSERT(len_ == 1 || len_ == 2);
int32_t* p = reinterpret_cast<int32_t*>(&buf_[len_]);
diff --git a/deps/v8/src/ia32/assembler-ia32.cc b/deps/v8/src/ia32/assembler-ia32.cc
index 3a2d3f82d..434bf070f 100644
--- a/deps/v8/src/ia32/assembler-ia32.cc
+++ b/deps/v8/src/ia32/assembler-ia32.cc
@@ -40,30 +40,8 @@
#include "macro-assembler.h"
#include "serialize.h"
-namespace v8 { namespace internal {
-
-// -----------------------------------------------------------------------------
-// Implementation of Register
-
-Register eax = { 0 };
-Register ecx = { 1 };
-Register edx = { 2 };
-Register ebx = { 3 };
-Register esp = { 4 };
-Register ebp = { 5 };
-Register esi = { 6 };
-Register edi = { 7 };
-Register no_reg = { -1 };
-
-XMMRegister xmm0 = { 0 };
-XMMRegister xmm1 = { 1 };
-XMMRegister xmm2 = { 2 };
-XMMRegister xmm3 = { 3 };
-XMMRegister xmm4 = { 4 };
-XMMRegister xmm5 = { 5 };
-XMMRegister xmm6 = { 6 };
-XMMRegister xmm7 = { 7 };
-
+namespace v8 {
+namespace internal {
// -----------------------------------------------------------------------------
// Implementation of CpuFeatures
@@ -256,20 +234,6 @@ Operand::Operand(Register index,
}
-void Operand::set_sib(ScaleFactor scale, Register index, Register base) {
- ASSERT(len_ == 1);
- ASSERT((scale & -4) == 0);
- buf_[1] = scale << 6 | index.code() << 3 | base.code();
- len_ = 2;
-}
-
-
-void Operand::set_disp8(int8_t disp) {
- ASSERT(len_ == 1 || len_ == 2);
- *reinterpret_cast<int8_t*>(&buf_[len_++]) = disp;
-}
-
-
bool Operand::is_reg(Register reg) const {
return ((buf_[0] & 0xF8) == 0xC0) // addressing mode is register only.
&& ((buf_[0] & 0x07) == reg.code()); // register codes match.
@@ -288,7 +252,7 @@ static void InitCoverageLog();
#endif
// spare_buffer_
-static byte* spare_buffer_ = NULL;
+byte* Assembler::spare_buffer_ = NULL;
Assembler::Assembler(void* buffer, int buffer_size) {
if (buffer == NULL) {
diff --git a/deps/v8/src/ia32/assembler-ia32.h b/deps/v8/src/ia32/assembler-ia32.h
index 4c995882e..79f239d7c 100644
--- a/deps/v8/src/ia32/assembler-ia32.h
+++ b/deps/v8/src/ia32/assembler-ia32.h
@@ -37,7 +37,8 @@
#ifndef V8_IA32_ASSEMBLER_IA32_H_
#define V8_IA32_ASSEMBLER_IA32_H_
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
// CPU Registers.
//
@@ -78,17 +79,15 @@ struct Register {
int code_;
};
-const int kNumRegisters = 8;
-
-extern Register eax;
-extern Register ecx;
-extern Register edx;
-extern Register ebx;
-extern Register esp;
-extern Register ebp;
-extern Register esi;
-extern Register edi;
-extern Register no_reg;
+const Register eax = { 0 };
+const Register ecx = { 1 };
+const Register edx = { 2 };
+const Register ebx = { 3 };
+const Register esp = { 4 };
+const Register ebp = { 5 };
+const Register esi = { 6 };
+const Register edi = { 7 };
+const Register no_reg = { -1 };
struct XMMRegister {
@@ -101,14 +100,14 @@ struct XMMRegister {
int code_;
};
-extern XMMRegister xmm0;
-extern XMMRegister xmm1;
-extern XMMRegister xmm2;
-extern XMMRegister xmm3;
-extern XMMRegister xmm4;
-extern XMMRegister xmm5;
-extern XMMRegister xmm6;
-extern XMMRegister xmm7;
+const XMMRegister xmm0 = { 0 };
+const XMMRegister xmm1 = { 1 };
+const XMMRegister xmm2 = { 2 };
+const XMMRegister xmm3 = { 3 };
+const XMMRegister xmm4 = { 4 };
+const XMMRegister xmm5 = { 5 };
+const XMMRegister xmm6 = { 6 };
+const XMMRegister xmm7 = { 7 };
enum Condition {
// any value < 0 is considered no_condition
@@ -815,6 +814,8 @@ class Assembler : public Malloced {
int buffer_size_;
// True if the assembler owns the buffer, false if buffer is external.
bool own_buffer_;
+ // A previously allocated buffer of kMinimalBufferSize bytes, or NULL.
+ static byte* spare_buffer_;
// code generation
byte* pc_; // the program counter; moves forward
diff --git a/deps/v8/src/ia32/builtins-ia32.cc b/deps/v8/src/ia32/builtins-ia32.cc
index 5c7ba8e1b..f65074bd4 100644
--- a/deps/v8/src/ia32/builtins-ia32.cc
+++ b/deps/v8/src/ia32/builtins-ia32.cc
@@ -29,7 +29,8 @@
#include "codegen-inl.h"
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
#define __ ACCESS_MASM(masm)
@@ -311,7 +312,7 @@ void Builtins::Generate_JSConstructCall(MacroAssembler* masm) {
// Set expected number of arguments to zero (not changing eax).
__ Set(ebx, Immediate(0));
- __ GetBuiltinEntry(edx, Builtins::CALL_NON_FUNCTION);
+ __ GetBuiltinEntry(edx, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
__ jmp(Handle<Code>(builtin(ArgumentsAdaptorTrampoline)),
RelocInfo::CODE_TARGET);
}
diff --git a/deps/v8/src/ia32/codegen-ia32-inl.h b/deps/v8/src/ia32/codegen-ia32-inl.h
new file mode 100644
index 000000000..49c706d13
--- /dev/null
+++ b/deps/v8/src/ia32/codegen-ia32-inl.h
@@ -0,0 +1,46 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+#ifndef V8_IA32_CODEGEN_IA32_INL_H_
+#define V8_IA32_CODEGEN_IA32_INL_H_
+
+namespace v8 {
+namespace internal {
+
+#define __ ACCESS_MASM(masm_)
+
+// Platform-specific inline functions.
+
+void DeferredCode::Jump() { __ jmp(&entry_label_); }
+void DeferredCode::Branch(Condition cc) { __ j(cc, &entry_label_); }
+
+#undef __
+
+} } // namespace v8::internal
+
+#endif // V8_IA32_CODEGEN_IA32_INL_H_
diff --git a/deps/v8/src/ia32/codegen-ia32.cc b/deps/v8/src/ia32/codegen-ia32.cc
index e260ab2d3..e9e40619d 100644
--- a/deps/v8/src/ia32/codegen-ia32.cc
+++ b/deps/v8/src/ia32/codegen-ia32.cc
@@ -36,11 +36,41 @@
#include "runtime.h"
#include "scopes.h"
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
#define __ ACCESS_MASM(masm_)
// -------------------------------------------------------------------------
+// Platform-specific DeferredCode functions.
+
+void DeferredCode::SaveRegisters() {
+ for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
+ int action = registers_[i];
+ if (action == kPush) {
+ __ push(RegisterAllocator::ToRegister(i));
+ } else if (action != kIgnore && (action & kSyncedFlag) == 0) {
+ __ mov(Operand(ebp, action), RegisterAllocator::ToRegister(i));
+ }
+ }
+}
+
+
+void DeferredCode::RestoreRegisters() {
+ // Restore registers in reverse order due to the stack.
+ for (int i = RegisterAllocator::kNumRegisters - 1; i >= 0; i--) {
+ int action = registers_[i];
+ if (action == kPush) {
+ __ pop(RegisterAllocator::ToRegister(i));
+ } else if (action != kIgnore) {
+ action &= ~kSyncedFlag;
+ __ mov(RegisterAllocator::ToRegister(i), Operand(ebp, action));
+ }
+ }
+}
+
+
+// -------------------------------------------------------------------------
// CodeGenState implementation.
CodeGenState::CodeGenState(CodeGenerator* owner)
@@ -72,7 +102,8 @@ CodeGenState::~CodeGenState() {
// -------------------------------------------------------------------------
// CodeGenerator implementation
-CodeGenerator::CodeGenerator(int buffer_size, Handle<Script> script,
+CodeGenerator::CodeGenerator(int buffer_size,
+ Handle<Script> script,
bool is_eval)
: is_eval_(is_eval),
script_(script),
@@ -107,13 +138,25 @@ void CodeGenerator::GenCode(FunctionLiteral* fun) {
RegisterAllocator register_allocator(this);
allocator_ = &register_allocator;
ASSERT(frame_ == NULL);
- frame_ = new VirtualFrame(this);
+ frame_ = new VirtualFrame();
set_in_spilled_code(false);
// Adjust for function-level loop nesting.
loop_nesting_ += fun->loop_nesting();
- {
+ JumpTarget::set_compiling_deferred_code(false);
+
+#ifdef DEBUG
+ if (strlen(FLAG_stop_at) > 0 &&
+ fun->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
+ frame_->SpillAll();
+ __ int3();
+ }
+#endif
+
+ // New scope to get automatic timing calculation.
+ { // NOLINT
+ HistogramTimerScope codegen_timer(&Counters::code_generation);
CodeGenState state(this);
// Entry:
@@ -125,19 +168,11 @@ void CodeGenerator::GenCode(FunctionLiteral* fun) {
allocator_->Initialize();
frame_->Enter();
-#ifdef DEBUG
- if (strlen(FLAG_stop_at) > 0 &&
- fun->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
- frame_->SpillAll();
- __ int3();
- }
-#endif
-
// Allocate space for locals and initialize them.
- frame_->AllocateStackSlots(scope_->num_stack_slots());
+ frame_->AllocateStackSlots();
// Initialize the function return target after the locals are set
// up, because it needs the expected frame height from the frame.
- function_return_.Initialize(this, JumpTarget::BIDIRECTIONAL);
+ function_return_.set_direction(JumpTarget::BIDIRECTIONAL);
function_return_is_shadowed_ = false;
// Allocate the arguments object and copy the parameters into it.
@@ -278,7 +313,7 @@ void CodeGenerator::GenCode(FunctionLiteral* fun) {
ASSERT(!function_return_is_shadowed_);
CodeForReturnPosition(fun);
frame_->PrepareForReturn();
- Result undefined(Factory::undefined_value(), this);
+ Result undefined(Factory::undefined_value());
if (function_return_.is_bound()) {
function_return_.Jump(&undefined);
} else {
@@ -293,7 +328,7 @@ void CodeGenerator::GenCode(FunctionLiteral* fun) {
// control does not flow off the end of the body so we did not
// compile an artificial return statement just above, and (b) there
// are return statements in the body but (c) they are all shadowed.
- Result return_value(this);
+ Result return_value;
// Though this is a (possibly) backward block, the frames can
// only differ on their top element.
function_return_.Bind(&return_value, 1);
@@ -313,10 +348,11 @@ void CodeGenerator::GenCode(FunctionLiteral* fun) {
DeleteFrame();
// Process any deferred code using the register allocator.
- if (HasStackOverflow()) {
- ClearDeferred();
- } else {
+ if (!HasStackOverflow()) {
+ HistogramTimerScope deferred_timer(&Counters::deferred_code_generation);
+ JumpTarget::set_compiling_deferred_code(true);
ProcessDeferred();
+ JumpTarget::set_compiling_deferred_code(false);
}
// There is no need to delete the register allocator, it is a
@@ -382,26 +418,25 @@ Operand CodeGenerator::ContextSlotOperandCheckExtensions(Slot* slot,
JumpTarget* slow) {
ASSERT(slot->type() == Slot::CONTEXT);
ASSERT(tmp.is_register());
- Result context(esi, this);
+ Register context = esi;
for (Scope* s = scope(); s != slot->var()->scope(); s = s->outer_scope()) {
if (s->num_heap_slots() > 0) {
if (s->calls_eval()) {
// Check that extension is NULL.
- __ cmp(ContextOperand(context.reg(), Context::EXTENSION_INDEX),
+ __ cmp(ContextOperand(context, Context::EXTENSION_INDEX),
Immediate(0));
slow->Branch(not_equal, not_taken);
}
- __ mov(tmp.reg(), ContextOperand(context.reg(), Context::CLOSURE_INDEX));
+ __ mov(tmp.reg(), ContextOperand(context, Context::CLOSURE_INDEX));
__ mov(tmp.reg(), FieldOperand(tmp.reg(), JSFunction::kContextOffset));
- context = tmp;
+ context = tmp.reg();
}
}
// Check that last extension is NULL.
- __ cmp(ContextOperand(context.reg(), Context::EXTENSION_INDEX),
- Immediate(0));
+ __ cmp(ContextOperand(context, Context::EXTENSION_INDEX), Immediate(0));
slow->Branch(not_equal, not_taken);
- __ mov(tmp.reg(), ContextOperand(context.reg(), Context::FCONTEXT_INDEX));
+ __ mov(tmp.reg(), ContextOperand(context, Context::FCONTEXT_INDEX));
return ContextOperand(tmp.reg(), slot->index());
}
@@ -461,14 +496,14 @@ void CodeGenerator::Load(Expression* x, TypeofState typeof_state) {
int original_height = frame_->height();
#endif
ASSERT(!in_spilled_code());
- JumpTarget true_target(this);
- JumpTarget false_target(this);
+ JumpTarget true_target;
+ JumpTarget false_target;
ControlDestination dest(&true_target, &false_target, true);
LoadCondition(x, typeof_state, &dest, false);
if (dest.false_was_fall_through()) {
// The false target was just bound.
- JumpTarget loaded(this);
+ JumpTarget loaded;
frame_->Push(Factory::false_value());
// There may be dangling jumps to the true target.
if (true_target.is_linked()) {
@@ -481,7 +516,7 @@ void CodeGenerator::Load(Expression* x, TypeofState typeof_state) {
} else if (dest.is_used()) {
// There is true, and possibly false, control flow (with true as
// the fall through).
- JumpTarget loaded(this);
+ JumpTarget loaded;
frame_->Push(Factory::true_value());
if (false_target.is_linked()) {
loaded.Jump();
@@ -497,7 +532,7 @@ void CodeGenerator::Load(Expression* x, TypeofState typeof_state) {
// short-circuited boolean operators).
ASSERT(has_valid_frame());
if (true_target.is_linked() || false_target.is_linked()) {
- JumpTarget loaded(this);
+ JumpTarget loaded;
loaded.Jump(); // Don't lose the current TOS.
if (true_target.is_linked()) {
true_target.Bind();
@@ -771,39 +806,35 @@ const char* GenericBinaryOpStub::GetName() {
}
-// A deferred code class implementing binary operations on likely smis.
-// This class generates both inline code and deferred code.
-// The fastest path is implemented inline. Deferred code calls
-// the GenericBinaryOpStub stub for slow cases.
+// Call the specialized stub for a binary operation.
class DeferredInlineBinaryOperation: public DeferredCode {
public:
- DeferredInlineBinaryOperation(CodeGenerator* generator,
- Token::Value op,
- OverwriteMode mode,
- GenericBinaryFlags flags)
- : DeferredCode(generator), stub_(op, mode, flags), op_(op) {
+ DeferredInlineBinaryOperation(Token::Value op,
+ Register dst,
+ Register left,
+ Register right,
+ OverwriteMode mode)
+ : op_(op), dst_(dst), left_(left), right_(right), mode_(mode) {
set_comment("[ DeferredInlineBinaryOperation");
}
- // Consumes its arguments, left and right, leaving them invalid.
- Result GenerateInlineCode(Result* left, Result* right);
-
virtual void Generate();
private:
- GenericBinaryOpStub stub_;
Token::Value op_;
+ Register dst_;
+ Register left_;
+ Register right_;
+ OverwriteMode mode_;
};
void DeferredInlineBinaryOperation::Generate() {
- Result left(generator());
- Result right(generator());
- enter()->Bind(&left, &right);
- generator()->frame()->Push(&left);
- generator()->frame()->Push(&right);
- Result answer = generator()->frame()->CallStub(&stub_, 2);
- exit_.Jump(&answer);
+ __ push(left_);
+ __ push(right_);
+ GenericBinaryOpStub stub(op_, mode_, SMI_CODE_INLINED);
+ __ CallStub(&stub);
+ if (!dst_.is(eax)) __ mov(dst_, eax);
}
@@ -853,7 +884,7 @@ void CodeGenerator::GenericBinaryOperation(Token::Value op,
if (left_is_string || right_is_string) {
frame_->Push(&left);
frame_->Push(&right);
- Result answer(this);
+ Result answer;
if (left_is_string) {
if (right_is_string) {
// TODO(lrn): if (left.is_constant() && right.is_constant())
@@ -999,31 +1030,342 @@ bool CodeGenerator::FoldConstantSmis(Token::Value op, int left, int right) {
}
+// Implements a binary operation using a deferred code object and some
+// inline code to operate on smis quickly.
void CodeGenerator::LikelySmiBinaryOperation(Token::Value op,
Result* left,
Result* right,
OverwriteMode overwrite_mode) {
- // Implements a binary operation using a deferred code object
- // and some inline code to operate on smis quickly.
+ // Special handling of div and mod because they use fixed registers.
+ if (op == Token::DIV || op == Token::MOD) {
+ // We need eax as the quotient register, edx as the remainder
+ // register, neither left nor right in eax or edx, and left copied
+ // to eax.
+ Result quotient;
+ Result remainder;
+ bool left_is_in_eax = false;
+ // Step 1: get eax for quotient.
+ if ((left->is_register() && left->reg().is(eax)) ||
+ (right->is_register() && right->reg().is(eax))) {
+ // One or both is in eax. Use a fresh non-edx register for
+ // them.
+ Result fresh = allocator_->Allocate();
+ ASSERT(fresh.is_valid());
+ if (fresh.reg().is(edx)) {
+ remainder = fresh;
+ fresh = allocator_->Allocate();
+ ASSERT(fresh.is_valid());
+ }
+ if (left->is_register() && left->reg().is(eax)) {
+ quotient = *left;
+ *left = fresh;
+ left_is_in_eax = true;
+ }
+ if (right->is_register() && right->reg().is(eax)) {
+ quotient = *right;
+ *right = fresh;
+ }
+ __ mov(fresh.reg(), eax);
+ } else {
+ // Neither left nor right is in eax.
+ quotient = allocator_->Allocate(eax);
+ }
+ ASSERT(quotient.is_register() && quotient.reg().is(eax));
+ ASSERT(!(left->is_register() && left->reg().is(eax)));
+ ASSERT(!(right->is_register() && right->reg().is(eax)));
+
+ // Step 2: get edx for remainder if necessary.
+ if (!remainder.is_valid()) {
+ if ((left->is_register() && left->reg().is(edx)) ||
+ (right->is_register() && right->reg().is(edx))) {
+ Result fresh = allocator_->Allocate();
+ ASSERT(fresh.is_valid());
+ if (left->is_register() && left->reg().is(edx)) {
+ remainder = *left;
+ *left = fresh;
+ }
+ if (right->is_register() && right->reg().is(edx)) {
+ remainder = *right;
+ *right = fresh;
+ }
+ __ mov(fresh.reg(), edx);
+ } else {
+ // Neither left nor right is in edx.
+ remainder = allocator_->Allocate(edx);
+ }
+ }
+ ASSERT(remainder.is_register() && remainder.reg().is(edx));
+ ASSERT(!(left->is_register() && left->reg().is(edx)));
+ ASSERT(!(right->is_register() && right->reg().is(edx)));
+
+ left->ToRegister();
+ right->ToRegister();
+ frame_->Spill(eax);
+ frame_->Spill(edx);
+
+ // Check that left and right are smi tagged.
+ DeferredInlineBinaryOperation* deferred =
+ new DeferredInlineBinaryOperation(op,
+ (op == Token::DIV) ? eax : edx,
+ left->reg(),
+ right->reg(),
+ overwrite_mode);
+ if (left->reg().is(right->reg())) {
+ __ test(left->reg(), Immediate(kSmiTagMask));
+ } else {
+ // Use the quotient register as a scratch for the tag check.
+ if (!left_is_in_eax) __ mov(eax, left->reg());
+ left_is_in_eax = false; // About to destroy the value in eax.
+ __ or_(eax, Operand(right->reg()));
+ ASSERT(kSmiTag == 0); // Adjust test if not the case.
+ __ test(eax, Immediate(kSmiTagMask));
+ }
+ deferred->Branch(not_zero);
+
+ if (!left_is_in_eax) __ mov(eax, left->reg());
+ // Sign extend eax into edx:eax.
+ __ cdq();
+ // Check for 0 divisor.
+ __ test(right->reg(), Operand(right->reg()));
+ deferred->Branch(zero);
+ // Divide edx:eax by the right operand.
+ __ idiv(right->reg());
+
+ // Complete the operation.
+ if (op == Token::DIV) {
+ // Check for negative zero result. If result is zero, and divisor
+ // is negative, return a floating point negative zero. The
+ // virtual frame is unchanged in this block, so local control flow
+ // can use a Label rather than a JumpTarget.
+ Label non_zero_result;
+ __ test(left->reg(), Operand(left->reg()));
+ __ j(not_zero, &non_zero_result);
+ __ test(right->reg(), Operand(right->reg()));
+ deferred->Branch(negative);
+ __ bind(&non_zero_result);
+ // Check for the corner case of dividing the most negative smi by
+ // -1. We cannot use the overflow flag, since it is not set by
+ // idiv instruction.
+ ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
+ __ cmp(eax, 0x40000000);
+ deferred->Branch(equal);
+ // Check that the remainder is zero.
+ __ test(edx, Operand(edx));
+ deferred->Branch(not_zero);
+ // Tag the result and store it in the quotient register.
+ ASSERT(kSmiTagSize == times_2); // adjust code if not the case
+ __ lea(eax, Operand(eax, eax, times_1, kSmiTag));
+ deferred->BindExit();
+ left->Unuse();
+ right->Unuse();
+ frame_->Push(&quotient);
+ } else {
+ ASSERT(op == Token::MOD);
+ // Check for a negative zero result. If the result is zero, and
+ // the dividend is negative, return a floating point negative
+ // zero. The frame is unchanged in this block, so local control
+ // flow can use a Label rather than a JumpTarget.
+ Label non_zero_result;
+ __ test(edx, Operand(edx));
+ __ j(not_zero, &non_zero_result, taken);
+ __ test(left->reg(), Operand(left->reg()));
+ deferred->Branch(negative);
+ __ bind(&non_zero_result);
+ deferred->BindExit();
+ left->Unuse();
+ right->Unuse();
+ frame_->Push(&remainder);
+ }
+ return;
+ }
+
+ // Special handling of shift operations because they use fixed
+ // registers.
+ if (op == Token::SHL || op == Token::SHR || op == Token::SAR) {
+ // Move left out of ecx if necessary.
+ if (left->is_register() && left->reg().is(ecx)) {
+ *left = allocator_->Allocate();
+ ASSERT(left->is_valid());
+ __ mov(left->reg(), ecx);
+ }
+ right->ToRegister(ecx);
+ left->ToRegister();
+ ASSERT(left->is_register() && !left->reg().is(ecx));
+ ASSERT(right->is_register() && right->reg().is(ecx));
+
+ // We will modify right, it must be spilled.
+ frame_->Spill(ecx);
+
+ // Use a fresh answer register to avoid spilling the left operand.
+ Result answer = allocator_->Allocate();
+ ASSERT(answer.is_valid());
+ // Check that both operands are smis using the answer register as a
+ // temporary.
+ DeferredInlineBinaryOperation* deferred =
+ new DeferredInlineBinaryOperation(op,
+ answer.reg(),
+ left->reg(),
+ ecx,
+ overwrite_mode);
+ __ mov(answer.reg(), left->reg());
+ __ or_(answer.reg(), Operand(ecx));
+ __ test(answer.reg(), Immediate(kSmiTagMask));
+ deferred->Branch(not_zero);
+
+ // Untag both operands.
+ __ mov(answer.reg(), left->reg());
+ __ sar(answer.reg(), kSmiTagSize);
+ __ sar(ecx, kSmiTagSize);
+ // Perform the operation.
+ switch (op) {
+ case Token::SAR:
+ __ sar(answer.reg());
+ // No checks of result necessary
+ break;
+ case Token::SHR: {
+ Label result_ok;
+ __ shr(answer.reg());
+ // Check that the *unsigned* result fits in a smi. Neither of
+ // the two high-order bits can be set:
+ // * 0x80000000: high bit would be lost when smi tagging.
+ // * 0x40000000: this number would convert to negative when smi
+ // tagging.
+ // These two cases can only happen with shifts by 0 or 1 when
+ // handed a valid smi. If the answer cannot be represented by a
+ // smi, restore the left and right arguments, and jump to slow
+ // case. The low bit of the left argument may be lost, but only
+ // in a case where it is dropped anyway.
+ __ test(answer.reg(), Immediate(0xc0000000));
+ __ j(zero, &result_ok);
+ ASSERT(kSmiTag == 0);
+ __ shl(ecx, kSmiTagSize);
+ deferred->Jump();
+ __ bind(&result_ok);
+ break;
+ }
+ case Token::SHL: {
+ Label result_ok;
+ __ shl(answer.reg());
+ // Check that the *signed* result fits in a smi.
+ __ cmp(answer.reg(), 0xc0000000);
+ __ j(positive, &result_ok);
+ ASSERT(kSmiTag == 0);
+ __ shl(ecx, kSmiTagSize);
+ deferred->Jump();
+ __ bind(&result_ok);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+ // Smi-tag the result in answer.
+ ASSERT(kSmiTagSize == 1); // Adjust code if not the case.
+ __ lea(answer.reg(),
+ Operand(answer.reg(), answer.reg(), times_1, kSmiTag));
+ deferred->BindExit();
+ left->Unuse();
+ right->Unuse();
+ frame_->Push(&answer);
+ return;
+ }
+
+ // Handle the other binary operations.
+ left->ToRegister();
+ right->ToRegister();
+ // A newly allocated register answer is used to hold the answer. The
+ // registers containing left and right are not modified so they don't
+ // need to be spilled in the fast case.
+ Result answer = allocator_->Allocate();
+ ASSERT(answer.is_valid());
+
+ // Perform the smi tag check.
DeferredInlineBinaryOperation* deferred =
- new DeferredInlineBinaryOperation(this, op, overwrite_mode,
- SMI_CODE_INLINED);
- // Generate the inline code that handles some smi operations,
- // and jumps to the deferred code for everything else.
- Result answer = deferred->GenerateInlineCode(left, right);
- deferred->BindExit(&answer);
+ new DeferredInlineBinaryOperation(op,
+ answer.reg(),
+ left->reg(),
+ right->reg(),
+ overwrite_mode);
+ if (left->reg().is(right->reg())) {
+ __ test(left->reg(), Immediate(kSmiTagMask));
+ } else {
+ __ mov(answer.reg(), left->reg());
+ __ or_(answer.reg(), Operand(right->reg()));
+ ASSERT(kSmiTag == 0); // Adjust test if not the case.
+ __ test(answer.reg(), Immediate(kSmiTagMask));
+ }
+ deferred->Branch(not_zero);
+ __ mov(answer.reg(), left->reg());
+ switch (op) {
+ case Token::ADD:
+ __ add(answer.reg(), Operand(right->reg())); // Add optimistically.
+ deferred->Branch(overflow);
+ break;
+
+ case Token::SUB:
+ __ sub(answer.reg(), Operand(right->reg())); // Subtract optimistically.
+ deferred->Branch(overflow);
+ break;
+
+ case Token::MUL: {
+ // If the smi tag is 0 we can just leave the tag on one operand.
+ ASSERT(kSmiTag == 0); // Adjust code below if not the case.
+ // Remove smi tag from the left operand (but keep sign).
+ // Left-hand operand has been copied into answer.
+ __ sar(answer.reg(), kSmiTagSize);
+ // Do multiplication of smis, leaving result in answer.
+ __ imul(answer.reg(), Operand(right->reg()));
+ // Go slow on overflows.
+ deferred->Branch(overflow);
+ // Check for negative zero result. If product is zero, and one
+ // argument is negative, go to slow case. The frame is unchanged
+ // in this block, so local control flow can use a Label rather
+ // than a JumpTarget.
+ Label non_zero_result;
+ __ test(answer.reg(), Operand(answer.reg()));
+ __ j(not_zero, &non_zero_result, taken);
+ __ mov(answer.reg(), left->reg());
+ __ or_(answer.reg(), Operand(right->reg()));
+ deferred->Branch(negative);
+ __ xor_(answer.reg(), Operand(answer.reg())); // Positive 0 is correct.
+ __ bind(&non_zero_result);
+ break;
+ }
+
+ case Token::BIT_OR:
+ __ or_(answer.reg(), Operand(right->reg()));
+ break;
+
+ case Token::BIT_AND:
+ __ and_(answer.reg(), Operand(right->reg()));
+ break;
+
+ case Token::BIT_XOR:
+ __ xor_(answer.reg(), Operand(right->reg()));
+ break;
+
+ default:
+ UNREACHABLE();
+ break;
+ }
+ deferred->BindExit();
+ left->Unuse();
+ right->Unuse();
frame_->Push(&answer);
}
+// Call the appropriate binary operation stub to compute src op value
+// and leave the result in dst.
class DeferredInlineSmiOperation: public DeferredCode {
public:
- DeferredInlineSmiOperation(CodeGenerator* generator,
- Token::Value op,
+ DeferredInlineSmiOperation(Token::Value op,
+ Register dst,
+ Register src,
Smi* value,
OverwriteMode overwrite_mode)
- : DeferredCode(generator),
- op_(op),
+ : op_(op),
+ dst_(dst),
+ src_(src),
value_(value),
overwrite_mode_(overwrite_mode) {
set_comment("[ DeferredInlineSmiOperation");
@@ -1033,31 +1375,35 @@ class DeferredInlineSmiOperation: public DeferredCode {
private:
Token::Value op_;
+ Register dst_;
+ Register src_;
Smi* value_;
OverwriteMode overwrite_mode_;
};
void DeferredInlineSmiOperation::Generate() {
- Result left(generator());
- enter()->Bind(&left);
- generator()->frame()->Push(&left);
- generator()->frame()->Push(value_);
- GenericBinaryOpStub igostub(op_, overwrite_mode_, SMI_CODE_INLINED);
- Result answer = generator()->frame()->CallStub(&igostub, 2);
- exit_.Jump(&answer);
+ __ push(src_);
+ __ push(Immediate(value_));
+ GenericBinaryOpStub stub(op_, overwrite_mode_, SMI_CODE_INLINED);
+ __ CallStub(&stub);
+ if (!dst_.is(eax)) __ mov(dst_, eax);
}
+// Call the appropriate binary operation stub to compute value op src
+// and leave the result in dst.
class DeferredInlineSmiOperationReversed: public DeferredCode {
public:
- DeferredInlineSmiOperationReversed(CodeGenerator* generator,
- Token::Value op,
+ DeferredInlineSmiOperationReversed(Token::Value op,
+ Register dst,
Smi* value,
+ Register src,
OverwriteMode overwrite_mode)
- : DeferredCode(generator),
- op_(op),
+ : op_(op),
+ dst_(dst),
value_(value),
+ src_(src),
overwrite_mode_(overwrite_mode) {
set_comment("[ DeferredInlineSmiOperationReversed");
}
@@ -1066,36 +1412,38 @@ class DeferredInlineSmiOperationReversed: public DeferredCode {
private:
Token::Value op_;
+ Register dst_;
Smi* value_;
+ Register src_;
OverwriteMode overwrite_mode_;
};
void DeferredInlineSmiOperationReversed::Generate() {
- Result right(generator());
- enter()->Bind(&right);
- generator()->frame()->Push(value_);
- generator()->frame()->Push(&right);
+ __ push(Immediate(value_));
+ __ push(src_);
GenericBinaryOpStub igostub(op_, overwrite_mode_, SMI_CODE_INLINED);
- Result answer = generator()->frame()->CallStub(&igostub, 2);
- exit_.Jump(&answer);
+ __ CallStub(&igostub);
+ if (!dst_.is(eax)) __ mov(dst_, eax);
}
+// The result of src + value is in dst. It either overflowed or was not
+// smi tagged. Undo the speculative addition and call the appropriate
+// specialized stub for add. The result is left in dst.
class DeferredInlineSmiAdd: public DeferredCode {
public:
- DeferredInlineSmiAdd(CodeGenerator* generator,
+ DeferredInlineSmiAdd(Register dst,
Smi* value,
OverwriteMode overwrite_mode)
- : DeferredCode(generator),
- value_(value),
- overwrite_mode_(overwrite_mode) {
+ : dst_(dst), value_(value), overwrite_mode_(overwrite_mode) {
set_comment("[ DeferredInlineSmiAdd");
}
virtual void Generate();
private:
+ Register dst_;
Smi* value_;
OverwriteMode overwrite_mode_;
};
@@ -1103,33 +1451,31 @@ class DeferredInlineSmiAdd: public DeferredCode {
void DeferredInlineSmiAdd::Generate() {
// Undo the optimistic add operation and call the shared stub.
- Result left(generator()); // Initially left + value_.
- enter()->Bind(&left);
- left.ToRegister();
- generator()->frame()->Spill(left.reg());
- __ sub(Operand(left.reg()), Immediate(value_));
- generator()->frame()->Push(&left);
- generator()->frame()->Push(value_);
+ __ sub(Operand(dst_), Immediate(value_));
+ __ push(dst_);
+ __ push(Immediate(value_));
GenericBinaryOpStub igostub(Token::ADD, overwrite_mode_, SMI_CODE_INLINED);
- Result answer = generator()->frame()->CallStub(&igostub, 2);
- exit_.Jump(&answer);
+ __ CallStub(&igostub);
+ if (!dst_.is(eax)) __ mov(dst_, eax);
}
+// The result of value + src is in dst. It either overflowed or was not
+// smi tagged. Undo the speculative addition and call the appropriate
+// specialized stub for add. The result is left in dst.
class DeferredInlineSmiAddReversed: public DeferredCode {
public:
- DeferredInlineSmiAddReversed(CodeGenerator* generator,
+ DeferredInlineSmiAddReversed(Register dst,
Smi* value,
OverwriteMode overwrite_mode)
- : DeferredCode(generator),
- value_(value),
- overwrite_mode_(overwrite_mode) {
+ : dst_(dst), value_(value), overwrite_mode_(overwrite_mode) {
set_comment("[ DeferredInlineSmiAddReversed");
}
virtual void Generate();
private:
+ Register dst_;
Smi* value_;
OverwriteMode overwrite_mode_;
};
@@ -1137,33 +1483,32 @@ class DeferredInlineSmiAddReversed: public DeferredCode {
void DeferredInlineSmiAddReversed::Generate() {
// Undo the optimistic add operation and call the shared stub.
- Result right(generator()); // Initially value_ + right.
- enter()->Bind(&right);
- right.ToRegister();
- generator()->frame()->Spill(right.reg());
- __ sub(Operand(right.reg()), Immediate(value_));
- generator()->frame()->Push(value_);
- generator()->frame()->Push(&right);
+ __ sub(Operand(dst_), Immediate(value_));
+ __ push(Immediate(value_));
+ __ push(dst_);
GenericBinaryOpStub igostub(Token::ADD, overwrite_mode_, SMI_CODE_INLINED);
- Result answer = generator()->frame()->CallStub(&igostub, 2);
- exit_.Jump(&answer);
+ __ CallStub(&igostub);
+ if (!dst_.is(eax)) __ mov(dst_, eax);
}
+// The result of src - value is in dst. It either overflowed or was not
+// smi tagged. Undo the speculative subtraction and call the
+// appropriate specialized stub for subtract. The result is left in
+// dst.
class DeferredInlineSmiSub: public DeferredCode {
public:
- DeferredInlineSmiSub(CodeGenerator* generator,
+ DeferredInlineSmiSub(Register dst,
Smi* value,
OverwriteMode overwrite_mode)
- : DeferredCode(generator),
- value_(value),
- overwrite_mode_(overwrite_mode) {
+ : dst_(dst), value_(value), overwrite_mode_(overwrite_mode) {
set_comment("[ DeferredInlineSmiSub");
}
virtual void Generate();
private:
+ Register dst_;
Smi* value_;
OverwriteMode overwrite_mode_;
};
@@ -1171,47 +1516,12 @@ class DeferredInlineSmiSub: public DeferredCode {
void DeferredInlineSmiSub::Generate() {
// Undo the optimistic sub operation and call the shared stub.
- Result left(generator()); // Initially left - value_.
- enter()->Bind(&left);
- left.ToRegister();
- generator()->frame()->Spill(left.reg());
- __ add(Operand(left.reg()), Immediate(value_));
- generator()->frame()->Push(&left);
- generator()->frame()->Push(value_);
+ __ add(Operand(dst_), Immediate(value_));
+ __ push(dst_);
+ __ push(Immediate(value_));
GenericBinaryOpStub igostub(Token::SUB, overwrite_mode_, SMI_CODE_INLINED);
- Result answer = generator()->frame()->CallStub(&igostub, 2);
- exit_.Jump(&answer);
-}
-
-
-class DeferredInlineSmiSubReversed: public DeferredCode {
- public:
- DeferredInlineSmiSubReversed(CodeGenerator* generator,
- Smi* value,
- OverwriteMode overwrite_mode)
- : DeferredCode(generator),
- value_(value),
- overwrite_mode_(overwrite_mode) {
- set_comment("[ DeferredInlineSmiSubReversed");
- }
-
- virtual void Generate();
-
- private:
- Smi* value_;
- OverwriteMode overwrite_mode_;
-};
-
-
-void DeferredInlineSmiSubReversed::Generate() {
- // Call the shared stub.
- Result right(generator());
- enter()->Bind(&right);
- generator()->frame()->Push(value_);
- generator()->frame()->Push(&right);
- GenericBinaryOpStub igostub(Token::SUB, overwrite_mode_, SMI_CODE_INLINED);
- Result answer = generator()->frame()->CallStub(&igostub, 2);
- exit_.Jump(&answer);
+ __ CallStub(&igostub);
+ if (!dst_.is(eax)) __ mov(dst_, eax);
}
@@ -1229,7 +1539,7 @@ void CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
// TODO(199): Optimize some special cases of operations involving a
// smi literal (multiply by 2, shift by 0, etc.).
if (IsUnsafeSmi(value)) {
- Result unsafe_operand(value, this);
+ Result unsafe_operand(value);
if (reversed) {
LikelySmiBinaryOperation(op, &unsafe_operand, operand,
overwrite_mode);
@@ -1247,134 +1557,162 @@ void CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
switch (op) {
case Token::ADD: {
+ operand->ToRegister();
+ frame_->Spill(operand->reg());
+
+ // Optimistically add. Call the specialized add stub if the
+ // result is not a smi or overflows.
DeferredCode* deferred = NULL;
if (reversed) {
- deferred = new DeferredInlineSmiAddReversed(this, smi_value,
+ deferred = new DeferredInlineSmiAddReversed(operand->reg(),
+ smi_value,
overwrite_mode);
} else {
- deferred = new DeferredInlineSmiAdd(this, smi_value, overwrite_mode);
+ deferred = new DeferredInlineSmiAdd(operand->reg(),
+ smi_value,
+ overwrite_mode);
}
- operand->ToRegister();
- frame_->Spill(operand->reg());
__ add(Operand(operand->reg()), Immediate(value));
- deferred->enter()->Branch(overflow, operand, not_taken);
+ deferred->Branch(overflow);
__ test(operand->reg(), Immediate(kSmiTagMask));
- deferred->enter()->Branch(not_zero, operand, not_taken);
- deferred->BindExit(operand);
+ deferred->Branch(not_zero);
+ deferred->BindExit();
frame_->Push(operand);
break;
}
case Token::SUB: {
DeferredCode* deferred = NULL;
- Result answer(this); // Only allocate a new register if reversed.
+ Result answer; // Only allocate a new register if reversed.
if (reversed) {
+ // The reversed case is only hit when the right operand is not a
+ // constant.
+ ASSERT(operand->is_register());
answer = allocator()->Allocate();
ASSERT(answer.is_valid());
- deferred = new DeferredInlineSmiSubReversed(this,
- smi_value,
- overwrite_mode);
__ Set(answer.reg(), Immediate(value));
- // We are in the reversed case so they can't both be Smi constants.
- ASSERT(operand->is_register());
+ deferred = new DeferredInlineSmiOperationReversed(op,
+ answer.reg(),
+ smi_value,
+ operand->reg(),
+ overwrite_mode);
__ sub(answer.reg(), Operand(operand->reg()));
} else {
operand->ToRegister();
frame_->Spill(operand->reg());
- deferred = new DeferredInlineSmiSub(this,
+ answer = *operand;
+ deferred = new DeferredInlineSmiSub(operand->reg(),
smi_value,
overwrite_mode);
__ sub(Operand(operand->reg()), Immediate(value));
- answer = *operand;
}
- deferred->enter()->Branch(overflow, operand, not_taken);
+ deferred->Branch(overflow);
__ test(answer.reg(), Immediate(kSmiTagMask));
- deferred->enter()->Branch(not_zero, operand, not_taken);
+ deferred->Branch(not_zero);
+ deferred->BindExit();
operand->Unuse();
- deferred->BindExit(&answer);
frame_->Push(&answer);
break;
}
- case Token::SAR: {
+ case Token::SAR:
if (reversed) {
- Result constant_operand(value, this);
+ Result constant_operand(value);
LikelySmiBinaryOperation(op, &constant_operand, operand,
overwrite_mode);
} else {
// Only the least significant 5 bits of the shift value are used.
// In the slow case, this masking is done inside the runtime call.
int shift_value = int_value & 0x1f;
- DeferredCode* deferred =
- new DeferredInlineSmiOperation(this, Token::SAR, smi_value,
- overwrite_mode);
operand->ToRegister();
+ frame_->Spill(operand->reg());
+ DeferredInlineSmiOperation* deferred =
+ new DeferredInlineSmiOperation(op,
+ operand->reg(),
+ operand->reg(),
+ smi_value,
+ overwrite_mode);
__ test(operand->reg(), Immediate(kSmiTagMask));
- deferred->enter()->Branch(not_zero, operand, not_taken);
+ deferred->Branch(not_zero);
if (shift_value > 0) {
- frame_->Spill(operand->reg());
__ sar(operand->reg(), shift_value);
__ and_(operand->reg(), ~kSmiTagMask);
}
- deferred->BindExit(operand);
+ deferred->BindExit();
frame_->Push(operand);
}
break;
- }
- case Token::SHR: {
+ case Token::SHR:
if (reversed) {
- Result constant_operand(value, this);
+ Result constant_operand(value);
LikelySmiBinaryOperation(op, &constant_operand, operand,
overwrite_mode);
} else {
// Only the least significant 5 bits of the shift value are used.
// In the slow case, this masking is done inside the runtime call.
int shift_value = int_value & 0x1f;
- DeferredCode* deferred =
- new DeferredInlineSmiOperation(this, Token::SHR, smi_value,
- overwrite_mode);
operand->ToRegister();
- __ test(operand->reg(), Immediate(kSmiTagMask));
- deferred->enter()->Branch(not_zero, operand, not_taken);
Result answer = allocator()->Allocate();
ASSERT(answer.is_valid());
+ DeferredInlineSmiOperation* deferred =
+ new DeferredInlineSmiOperation(op,
+ answer.reg(),
+ operand->reg(),
+ smi_value,
+ overwrite_mode);
+ __ test(operand->reg(), Immediate(kSmiTagMask));
+ deferred->Branch(not_zero);
__ mov(answer.reg(), operand->reg());
__ sar(answer.reg(), kSmiTagSize);
__ shr(answer.reg(), shift_value);
// A negative Smi shifted right two is in the positive Smi range.
if (shift_value < 2) {
__ test(answer.reg(), Immediate(0xc0000000));
- deferred->enter()->Branch(not_zero, operand, not_taken);
+ deferred->Branch(not_zero);
}
operand->Unuse();
ASSERT(kSmiTagSize == times_2); // Adjust the code if not true.
__ lea(answer.reg(),
Operand(answer.reg(), answer.reg(), times_1, kSmiTag));
- deferred->BindExit(&answer);
+ deferred->BindExit();
frame_->Push(&answer);
}
break;
- }
- case Token::SHL: {
+ case Token::SHL:
if (reversed) {
- Result constant_operand(value, this);
+ Result constant_operand(value);
LikelySmiBinaryOperation(op, &constant_operand, operand,
overwrite_mode);
} else {
// Only the least significant 5 bits of the shift value are used.
// In the slow case, this masking is done inside the runtime call.
int shift_value = int_value & 0x1f;
- DeferredCode* deferred =
- new DeferredInlineSmiOperation(this, Token::SHL, smi_value,
- overwrite_mode);
operand->ToRegister();
- __ test(operand->reg(), Immediate(kSmiTagMask));
- deferred->enter()->Branch(not_zero, operand, not_taken);
- if (shift_value != 0) {
+ if (shift_value == 0) {
+ DeferredInlineSmiOperation* deferred =
+ new DeferredInlineSmiOperation(op,
+ operand->reg(),
+ operand->reg(),
+ smi_value,
+ overwrite_mode);
+ __ test(operand->reg(), Immediate(kSmiTagMask));
+ deferred->Branch(not_zero);
+ deferred->BindExit();
+ frame_->Push(operand);
+ } else {
+ // Use a fresh temporary for nonzero shift values.
Result answer = allocator()->Allocate();
ASSERT(answer.is_valid());
+ DeferredInlineSmiOperation* deferred =
+ new DeferredInlineSmiOperation(op,
+ answer.reg(),
+ operand->reg(),
+ smi_value,
+ overwrite_mode);
+ __ test(operand->reg(), Immediate(kSmiTagMask));
+ deferred->Branch(not_zero);
__ mov(answer.reg(), operand->reg());
ASSERT(kSmiTag == 0); // adjust code if not the case
// We do no shifts, only the Smi conversion, if shift_value is 1.
@@ -1382,35 +1720,37 @@ void CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
__ shl(answer.reg(), shift_value - 1);
}
// Convert int result to Smi, checking that it is in int range.
- ASSERT(kSmiTagSize == times_2); // adjust code if not the case
+ ASSERT(kSmiTagSize == 1); // adjust code if not the case
__ add(answer.reg(), Operand(answer.reg()));
- deferred->enter()->Branch(overflow, operand, not_taken);
+ deferred->Branch(overflow);
+ deferred->BindExit();
operand->Unuse();
- deferred->BindExit(&answer);
frame_->Push(&answer);
- } else {
- deferred->BindExit(operand);
- frame_->Push(operand);
}
}
break;
- }
case Token::BIT_OR:
case Token::BIT_XOR:
case Token::BIT_AND: {
+ operand->ToRegister();
+ frame_->Spill(operand->reg());
DeferredCode* deferred = NULL;
if (reversed) {
- deferred = new DeferredInlineSmiOperationReversed(this, op, smi_value,
+ deferred = new DeferredInlineSmiOperationReversed(op,
+ operand->reg(),
+ smi_value,
+ operand->reg(),
overwrite_mode);
} else {
- deferred = new DeferredInlineSmiOperation(this, op, smi_value,
+ deferred = new DeferredInlineSmiOperation(op,
+ operand->reg(),
+ operand->reg(),
+ smi_value,
overwrite_mode);
}
- operand->ToRegister();
__ test(operand->reg(), Immediate(kSmiTagMask));
- deferred->enter()->Branch(not_zero, operand, not_taken);
- frame_->Spill(operand->reg());
+ deferred->Branch(not_zero);
if (op == Token::BIT_AND) {
__ and_(Operand(operand->reg()), Immediate(value));
} else if (op == Token::BIT_XOR) {
@@ -1423,13 +1763,13 @@ void CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
__ or_(Operand(operand->reg()), Immediate(value));
}
}
- deferred->BindExit(operand);
+ deferred->BindExit();
frame_->Push(operand);
break;
}
default: {
- Result constant_operand(value, this);
+ Result constant_operand(value);
if (reversed) {
LikelySmiBinaryOperation(op, &constant_operand, operand,
overwrite_mode);
@@ -1478,8 +1818,8 @@ void CodeGenerator::Comparison(Condition cc,
// Strict only makes sense for equality comparisons.
ASSERT(!strict || cc == equal);
- Result left_side(this);
- Result right_side(this);
+ Result left_side;
+ Result right_side;
// Implement '>' and '<=' by reversal to obtain ECMA-262 conversion order.
if (cc == greater || cc == less_equal) {
cc = ReverseCondition(cc);
@@ -1534,7 +1874,7 @@ void CodeGenerator::Comparison(Condition cc,
// where both sides are Smis.
left_side.ToRegister();
ASSERT(left_side.is_valid());
- JumpTarget is_smi(this);
+ JumpTarget is_smi;
__ test(left_side.reg(), Immediate(kSmiTagMask));
is_smi.Branch(zero, &left_side, &right_side, taken);
@@ -1605,7 +1945,7 @@ void CodeGenerator::Comparison(Condition cc,
(right_side.is_constant() && !right_side.handle()->IsSmi());
left_side.ToRegister();
right_side.ToRegister();
- JumpTarget is_smi(this);
+ JumpTarget is_smi;
if (!known_non_smi) {
// Check for the smi case.
Result temp = allocator_->Allocate();
@@ -1645,12 +1985,14 @@ void CodeGenerator::Comparison(Condition cc,
class CallFunctionStub: public CodeStub {
public:
- explicit CallFunctionStub(int argc) : argc_(argc) { }
+ CallFunctionStub(int argc, InLoopFlag in_loop)
+ : argc_(argc), in_loop_(in_loop) { }
void Generate(MacroAssembler* masm);
private:
int argc_;
+ InLoopFlag in_loop_;
#ifdef DEBUG
void Print() { PrintF("CallFunctionStub (args %d)\n", argc_); }
@@ -1658,6 +2000,7 @@ class CallFunctionStub: public CodeStub {
Major MajorKey() { return CallFunction; }
int MinorKey() { return argc_; }
+ InLoopFlag InLoop() { return in_loop_; }
};
@@ -1675,7 +2018,8 @@ void CodeGenerator::CallWithArguments(ZoneList<Expression*>* args,
CodeForSourcePosition(position);
// Use the shared code stub to call the function.
- CallFunctionStub call_function(arg_count);
+ InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
+ CallFunctionStub call_function(arg_count, in_loop);
Result answer = frame_->CallStub(&call_function, arg_count + 1);
// Restore context and replace function on the stack with the
// result of the stub invocation.
@@ -1686,8 +2030,7 @@ void CodeGenerator::CallWithArguments(ZoneList<Expression*>* args,
class DeferredStackCheck: public DeferredCode {
public:
- explicit DeferredStackCheck(CodeGenerator* generator)
- : DeferredCode(generator) {
+ DeferredStackCheck() {
set_comment("[ DeferredStackCheck");
}
@@ -1696,21 +2039,18 @@ class DeferredStackCheck: public DeferredCode {
void DeferredStackCheck::Generate() {
- enter()->Bind();
StackCheckStub stub;
- Result ignored = generator()->frame()->CallStub(&stub, 0);
- ignored.Unuse();
- exit_.Jump();
+ __ CallStub(&stub);
}
void CodeGenerator::CheckStack() {
if (FLAG_check_stack) {
- DeferredStackCheck* deferred = new DeferredStackCheck(this);
+ DeferredStackCheck* deferred = new DeferredStackCheck;
ExternalReference stack_guard_limit =
ExternalReference::address_of_stack_guard_limit();
__ cmp(esp, Operand::StaticVariable(stack_guard_limit));
- deferred->enter()->Branch(below, not_taken);
+ deferred->Branch(below);
deferred->BindExit();
}
}
@@ -1750,7 +2090,7 @@ void CodeGenerator::VisitBlock(Block* node) {
ASSERT(!in_spilled_code());
Comment cmnt(masm_, "[ Block");
CodeForStatementPosition(node);
- node->break_target()->Initialize(this);
+ node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
VisitStatements(node->statements());
if (node->break_target()->is_linked()) {
node->break_target()->Bind();
@@ -1760,13 +2100,14 @@ void CodeGenerator::VisitBlock(Block* node) {
void CodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
- frame_->Push(pairs);
-
- // Duplicate the context register.
- Result context(esi, this);
- frame_->Push(&context);
-
- frame_->Push(Smi::FromInt(is_eval() ? 1 : 0));
+ // Call the runtime to declare the globals. The inevitable call
+ // will sync frame elements to memory anyway, so we do it eagerly to
+ // allow us to push the arguments directly into place.
+ frame_->SyncRange(0, frame_->element_count() - 1);
+
+ frame_->EmitPush(Immediate(pairs));
+ frame_->EmitPush(esi); // The context is the second argument.
+ frame_->EmitPush(Immediate(Smi::FromInt(is_eval() ? 1 : 0)));
Result ignored = frame_->CallRuntime(Runtime::kDeclareGlobals, 3);
// Return value is ignored.
}
@@ -1786,24 +2127,25 @@ void CodeGenerator::VisitDeclaration(Declaration* node) {
// Variables with a "LOOKUP" slot were introduced as non-locals
// during variable resolution and must have mode DYNAMIC.
ASSERT(var->is_dynamic());
- // For now, just do a runtime call. Duplicate the context register.
- Result context(esi, this);
- frame_->Push(&context);
- frame_->Push(var->name());
+ // For now, just do a runtime call. Sync the virtual frame eagerly
+ // so we can simply push the arguments into place.
+ frame_->SyncRange(0, frame_->element_count() - 1);
+ frame_->EmitPush(esi);
+ frame_->EmitPush(Immediate(var->name()));
// Declaration nodes are always introduced in one of two modes.
ASSERT(node->mode() == Variable::VAR || node->mode() == Variable::CONST);
PropertyAttributes attr = node->mode() == Variable::VAR ? NONE : READ_ONLY;
- frame_->Push(Smi::FromInt(attr));
+ frame_->EmitPush(Immediate(Smi::FromInt(attr)));
// Push initial value, if any.
// Note: For variables we must not push an initial value (such as
// 'undefined') because we may have a (legal) redeclaration and we
// must not destroy the current value.
if (node->mode() == Variable::CONST) {
- frame_->Push(Factory::the_hole_value());
+ frame_->EmitPush(Immediate(Factory::the_hole_value()));
} else if (node->fun() != NULL) {
Load(node->fun());
} else {
- frame_->Push(Smi::FromInt(0)); // no initial value!
+ frame_->EmitPush(Immediate(Smi::FromInt(0))); // no initial value!
}
Result ignored = frame_->CallRuntime(Runtime::kDeclareContextSlot, 4);
// Ignore the return value (declarations are statements).
@@ -1864,10 +2206,10 @@ void CodeGenerator::VisitIfStatement(IfStatement* node) {
bool has_else_stm = node->HasElseStatement();
CodeForStatementPosition(node);
- JumpTarget exit(this);
+ JumpTarget exit;
if (has_then_stm && has_else_stm) {
- JumpTarget then(this);
- JumpTarget else_(this);
+ JumpTarget then;
+ JumpTarget else_;
ControlDestination dest(&then, &else_, true);
LoadCondition(node->condition(), NOT_INSIDE_TYPEOF, &dest, true);
@@ -1894,7 +2236,7 @@ void CodeGenerator::VisitIfStatement(IfStatement* node) {
} else if (has_then_stm) {
ASSERT(!has_else_stm);
- JumpTarget then(this);
+ JumpTarget then;
ControlDestination dest(&then, &exit, true);
LoadCondition(node->condition(), NOT_INSIDE_TYPEOF, &dest, true);
@@ -1914,7 +2256,7 @@ void CodeGenerator::VisitIfStatement(IfStatement* node) {
} else if (has_else_stm) {
ASSERT(!has_then_stm);
- JumpTarget else_(this);
+ JumpTarget else_;
ControlDestination dest(&exit, &else_, false);
LoadCondition(node->condition(), NOT_INSIDE_TYPEOF, &dest, true);
@@ -2026,7 +2368,7 @@ void CodeGenerator::VisitWithEnterStatement(WithEnterStatement* node) {
Comment cmnt(masm_, "[ WithEnterStatement");
CodeForStatementPosition(node);
Load(node->expression());
- Result context(this);
+ Result context;
if (node->is_catch_block()) {
context = frame_->CallRuntime(Runtime::kPushCatchContext, 1);
} else {
@@ -2081,8 +2423,8 @@ void CodeGenerator::GenerateFastCaseSwitchJumpTable(
// placeholders, and fill in the addresses after the labels have been
// bound.
- JumpTarget setup_default(this);
- JumpTarget is_smi(this);
+ JumpTarget setup_default;
+ JumpTarget is_smi;
// A non-null default label pointer indicates a default case among
// the case labels. Otherwise we use the break target as a
@@ -2127,7 +2469,7 @@ void CodeGenerator::GenerateFastCaseSwitchJumpTable(
// frame of the correct height can be merged to). Keep a copy to
// restore at the start of every label. Create a jump target and
// bind it to set its entry frame properly.
- JumpTarget entry_target(this, JumpTarget::BIDIRECTIONAL);
+ JumpTarget entry_target(JumpTarget::BIDIRECTIONAL);
entry_target.Bind(&smi_value);
VirtualFrame* start_frame = new VirtualFrame(frame_);
@@ -2177,8 +2519,6 @@ void CodeGenerator::GenerateFastCaseSwitchJumpTable(
__ WriteInternalReference(entry_pos, *case_targets[i]);
}
}
-
- delete start_frame;
}
@@ -2186,7 +2526,7 @@ void CodeGenerator::VisitSwitchStatement(SwitchStatement* node) {
ASSERT(!in_spilled_code());
Comment cmnt(masm_, "[ SwitchStatement");
CodeForStatementPosition(node);
- node->break_target()->Initialize(this);
+ node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
// Compile the switch value.
Load(node->tag());
@@ -2199,7 +2539,7 @@ void CodeGenerator::VisitSwitchStatement(SwitchStatement* node) {
int length = cases->length();
CaseClause* default_clause = NULL;
- JumpTarget next_test(this);
+ JumpTarget next_test;
// Compile the case label expressions and comparisons. Exit early
// if a comparison is unconditionally true. The target next_test is
// bound before the loop in order to indicate control flow to the
@@ -2207,7 +2547,6 @@ void CodeGenerator::VisitSwitchStatement(SwitchStatement* node) {
next_test.Bind();
for (int i = 0; i < length && !next_test.is_unused(); i++) {
CaseClause* clause = cases->at(i);
- clause->body_target()->Initialize(this);
// The default is not a test, but remember it for later.
if (clause->is_default()) {
default_clause = clause;
@@ -2278,7 +2617,7 @@ void CodeGenerator::VisitSwitchStatement(SwitchStatement* node) {
if (clause->is_default()) {
clause->body_target()->Bind();
} else {
- JumpTarget body(this);
+ JumpTarget body;
body.Jump();
clause->body_target()->Bind();
frame_->Drop();
@@ -2316,7 +2655,7 @@ void CodeGenerator::VisitLoopStatement(LoopStatement* node) {
ASSERT(!in_spilled_code());
Comment cmnt(masm_, "[ LoopStatement");
CodeForStatementPosition(node);
- node->break_target()->Initialize(this);
+ node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
// Simple condition analysis. ALWAYS_TRUE and ALWAYS_FALSE represent a
// known result for the test expression, with no side effects.
@@ -2337,21 +2676,21 @@ void CodeGenerator::VisitLoopStatement(LoopStatement* node) {
switch (node->type()) {
case LoopStatement::DO_LOOP: {
- JumpTarget body(this, JumpTarget::BIDIRECTIONAL);
+ JumpTarget body(JumpTarget::BIDIRECTIONAL);
IncrementLoopNesting();
// Label the top of the loop for the backward jump if necessary.
if (info == ALWAYS_TRUE) {
// Use the continue target.
- node->continue_target()->Initialize(this, JumpTarget::BIDIRECTIONAL);
+ node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
node->continue_target()->Bind();
} else if (info == ALWAYS_FALSE) {
// No need to label it.
- node->continue_target()->Initialize(this);
+ node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
} else {
// Continue is the test, so use the backward body target.
ASSERT(info == DONT_KNOW);
- node->continue_target()->Initialize(this);
+ node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
body.Bind();
}
@@ -2410,27 +2749,25 @@ void CodeGenerator::VisitLoopStatement(LoopStatement* node) {
JumpTarget body;
if (test_at_bottom) {
- body.Initialize(this, JumpTarget::BIDIRECTIONAL);
- } else {
- body.Initialize(this);
+ body.set_direction(JumpTarget::BIDIRECTIONAL);
}
// Based on the condition analysis, compile the test as necessary.
if (info == ALWAYS_TRUE) {
// We will not compile the test expression. Label the top of
// the loop with the continue target.
- node->continue_target()->Initialize(this, JumpTarget::BIDIRECTIONAL);
+ node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
node->continue_target()->Bind();
} else {
ASSERT(info == DONT_KNOW); // ALWAYS_FALSE cannot reach here.
if (test_at_bottom) {
// Continue is the test at the bottom, no need to label the
// test at the top. The body is a backward target.
- node->continue_target()->Initialize(this);
+ node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
} else {
// Label the test at the top as the continue target. The
// body is a forward-only target.
- node->continue_target()->Initialize(this, JumpTarget::BIDIRECTIONAL);
+ node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
node->continue_target()->Bind();
}
// Compile the test with the body as the true target and
@@ -2513,15 +2850,13 @@ void CodeGenerator::VisitLoopStatement(LoopStatement* node) {
// Target for backward edge if no test at the bottom, otherwise
// unused.
- JumpTarget loop(this, JumpTarget::BIDIRECTIONAL);
+ JumpTarget loop(JumpTarget::BIDIRECTIONAL);
// Target for backward edge if there is a test at the bottom,
// otherwise used as target for test at the top.
JumpTarget body;
if (test_at_bottom) {
- body.Initialize(this, JumpTarget::BIDIRECTIONAL);
- } else {
- body.Initialize(this);
+ body.set_direction(JumpTarget::BIDIRECTIONAL);
}
// Based on the condition analysis, compile the test as necessary.
@@ -2530,11 +2865,11 @@ void CodeGenerator::VisitLoopStatement(LoopStatement* node) {
// the loop.
if (node->next() == NULL) {
// Use the continue target if there is no update expression.
- node->continue_target()->Initialize(this, JumpTarget::BIDIRECTIONAL);
+ node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
node->continue_target()->Bind();
} else {
// Otherwise use the backward loop target.
- node->continue_target()->Initialize(this);
+ node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
loop.Bind();
}
} else {
@@ -2542,16 +2877,16 @@ void CodeGenerator::VisitLoopStatement(LoopStatement* node) {
if (test_at_bottom) {
// Continue is either the update expression or the test at
// the bottom, no need to label the test at the top.
- node->continue_target()->Initialize(this);
+ node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
} else if (node->next() == NULL) {
// We are not recompiling the test at the bottom and there
// is no update expression.
- node->continue_target()->Initialize(this, JumpTarget::BIDIRECTIONAL);
+ node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
node->continue_target()->Bind();
} else {
// We are not recompiling the test at the bottom and there
// is an update expression.
- node->continue_target()->Initialize(this);
+ node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
loop.Bind();
}
@@ -2651,16 +2986,16 @@ void CodeGenerator::VisitLoopStatement(LoopStatement* node) {
void CodeGenerator::VisitForInStatement(ForInStatement* node) {
ASSERT(!in_spilled_code());
- VirtualFrame::SpilledScope spilled_scope(this);
+ VirtualFrame::SpilledScope spilled_scope;
Comment cmnt(masm_, "[ ForInStatement");
CodeForStatementPosition(node);
- JumpTarget primitive(this);
- JumpTarget jsobject(this);
- JumpTarget fixed_array(this);
- JumpTarget entry(this, JumpTarget::BIDIRECTIONAL);
- JumpTarget end_del_check(this);
- JumpTarget exit(this);
+ JumpTarget primitive;
+ JumpTarget jsobject;
+ JumpTarget fixed_array;
+ JumpTarget entry(JumpTarget::BIDIRECTIONAL);
+ JumpTarget end_del_check;
+ JumpTarget exit;
// Get the object to enumerate over (converted to JSObject).
LoadAndSpill(node->enumerable());
@@ -2745,8 +3080,8 @@ void CodeGenerator::VisitForInStatement(ForInStatement* node) {
entry.Bind();
// Grab the current frame's height for the break and continue
// targets only after all the state is pushed on the frame.
- node->break_target()->Initialize(this);
- node->continue_target()->Initialize(this);
+ node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
+ node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
__ mov(eax, frame_->ElementAt(0)); // load the current count
__ cmp(eax, frame_->ElementAt(1)); // compare to the array length
@@ -2841,12 +3176,12 @@ void CodeGenerator::VisitForInStatement(ForInStatement* node) {
void CodeGenerator::VisitTryCatch(TryCatch* node) {
ASSERT(!in_spilled_code());
- VirtualFrame::SpilledScope spilled_scope(this);
+ VirtualFrame::SpilledScope spilled_scope;
Comment cmnt(masm_, "[ TryCatch");
CodeForStatementPosition(node);
- JumpTarget try_block(this);
- JumpTarget exit(this);
+ JumpTarget try_block;
+ JumpTarget exit;
try_block.Call();
// --- Catch block ---
@@ -2937,7 +3272,7 @@ void CodeGenerator::VisitTryCatch(TryCatch* node) {
// Generate unlink code for the (formerly) shadowing targets that
// have been jumped to. Deallocate each shadow target.
- Result return_value(this);
+ Result return_value;
for (int i = 0; i < shadows.length(); i++) {
if (shadows[i]->is_linked()) {
// Unlink from try chain; be careful not to destroy the TOS if
@@ -2972,7 +3307,6 @@ void CodeGenerator::VisitTryCatch(TryCatch* node) {
shadows[i]->other_target()->Jump();
}
}
- delete shadows[i];
}
exit.Bind();
@@ -2981,7 +3315,7 @@ void CodeGenerator::VisitTryCatch(TryCatch* node) {
void CodeGenerator::VisitTryFinally(TryFinally* node) {
ASSERT(!in_spilled_code());
- VirtualFrame::SpilledScope spilled_scope(this);
+ VirtualFrame::SpilledScope spilled_scope;
Comment cmnt(masm_, "[ TryFinally");
CodeForStatementPosition(node);
@@ -2990,8 +3324,8 @@ void CodeGenerator::VisitTryFinally(TryFinally* node) {
// break/continue from within the try block.
enum { FALLING, THROWING, JUMPING };
- JumpTarget try_block(this);
- JumpTarget finally_block(this);
+ JumpTarget try_block;
+ JumpTarget finally_block;
try_block.Call();
@@ -3070,7 +3404,7 @@ void CodeGenerator::VisitTryFinally(TryFinally* node) {
// on the virtual frame. We must preserve it until it is
// pushed.
if (i == kReturnShadowIndex) {
- Result return_value(this);
+ Result return_value;
shadows[i]->Bind(&return_value);
return_value.ToRegister(eax);
} else {
@@ -3143,7 +3477,7 @@ void CodeGenerator::VisitTryFinally(TryFinally* node) {
} else {
// Branch around the preparation for return which may emit
// code.
- JumpTarget skip(this);
+ JumpTarget skip;
skip.Branch(not_equal);
frame_->PrepareForReturn();
original->Jump(&return_value);
@@ -3153,12 +3487,11 @@ void CodeGenerator::VisitTryFinally(TryFinally* node) {
original->Branch(equal);
}
}
- delete shadows[i];
}
if (has_valid_frame()) {
// Check if we need to rethrow the exception.
- JumpTarget exit(this);
+ JumpTarget exit;
__ cmp(Operand(ecx), Immediate(Smi::FromInt(THROWING)));
exit.Branch(not_equal);
@@ -3186,13 +3519,18 @@ void CodeGenerator::VisitDebuggerStatement(DebuggerStatement* node) {
void CodeGenerator::InstantiateBoilerplate(Handle<JSFunction> boilerplate) {
+ // Call the runtime to instantiate the function boilerplate object.
+ // The inevitable call will sync frame elements to memory anyway, so
+ // we do it eagerly to allow us to push the arguments directly into
+ // place.
ASSERT(boilerplate->IsBoilerplate());
+ frame_->SyncRange(0, frame_->element_count() - 1);
// Push the boilerplate on the stack.
- frame_->Push(boilerplate);
+ frame_->EmitPush(Immediate(boilerplate));
// Create a new closure.
- frame_->Push(esi);
+ frame_->EmitPush(esi);
Result result = frame_->CallRuntime(Runtime::kNewClosure, 2);
frame_->Push(&result);
}
@@ -3218,9 +3556,9 @@ void CodeGenerator::VisitFunctionBoilerplateLiteral(
void CodeGenerator::VisitConditional(Conditional* node) {
Comment cmnt(masm_, "[ Conditional");
- JumpTarget then(this);
- JumpTarget else_(this);
- JumpTarget exit(this);
+ JumpTarget then;
+ JumpTarget else_;
+ JumpTarget exit;
ControlDestination dest(&then, &else_, true);
LoadCondition(node->condition(), NOT_INSIDE_TYPEOF, &dest, true);
@@ -3252,9 +3590,9 @@ void CodeGenerator::LoadFromSlot(Slot* slot, TypeofState typeof_state) {
if (slot->type() == Slot::LOOKUP) {
ASSERT(slot->var()->is_dynamic());
- JumpTarget slow(this);
- JumpTarget done(this);
- Result value(this);
+ JumpTarget slow;
+ JumpTarget done;
+ Result value;
// Generate fast-case code for variables that might be shadowed by
// eval-introduced variables. Eval is used a lot without
@@ -3298,8 +3636,12 @@ void CodeGenerator::LoadFromSlot(Slot* slot, TypeofState typeof_state) {
}
slow.Bind();
- frame_->Push(esi);
- frame_->Push(slot->var()->name());
+ // A runtime call is inevitable. We eagerly sync frame elements
+ // to memory so that we can push the arguments directly into place
+ // on top of the frame.
+ frame_->SyncRange(0, frame_->element_count() - 1);
+ frame_->EmitPush(esi);
+ frame_->EmitPush(Immediate(slot->var()->name()));
if (typeof_state == INSIDE_TYPEOF) {
value =
frame_->CallRuntime(Runtime::kLoadContextSlotNoReferenceError, 2);
@@ -3317,9 +3659,9 @@ void CodeGenerator::LoadFromSlot(Slot* slot, TypeofState typeof_state) {
//
// We currently spill the virtual frame because constants use the
// potentially unsafe direct-frame access of SlotOperand.
- VirtualFrame::SpilledScope spilled_scope(this);
+ VirtualFrame::SpilledScope spilled_scope;
Comment cmnt(masm_, "[ Load const");
- JumpTarget exit(this);
+ JumpTarget exit;
__ mov(ecx, SlotOperand(slot, ecx));
__ cmp(ecx, Factory::the_hole_value());
exit.Branch(not_equal);
@@ -3354,7 +3696,7 @@ Result CodeGenerator::LoadFromGlobalSlotCheckExtensions(
JumpTarget* slow) {
// Check that no extension objects have been created by calls to
// eval from the current scope to the global scope.
- Result context(esi, this);
+ Register context = esi;
Result tmp = allocator_->Allocate();
ASSERT(tmp.is_valid()); // All non-reserved registers were available.
@@ -3363,14 +3705,14 @@ Result CodeGenerator::LoadFromGlobalSlotCheckExtensions(
if (s->num_heap_slots() > 0) {
if (s->calls_eval()) {
// Check that extension is NULL.
- __ cmp(ContextOperand(context.reg(), Context::EXTENSION_INDEX),
+ __ cmp(ContextOperand(context, Context::EXTENSION_INDEX),
Immediate(0));
slow->Branch(not_equal, not_taken);
}
// Load next context in chain.
- __ mov(tmp.reg(), ContextOperand(context.reg(), Context::CLOSURE_INDEX));
+ __ mov(tmp.reg(), ContextOperand(context, Context::CLOSURE_INDEX));
__ mov(tmp.reg(), FieldOperand(tmp.reg(), JSFunction::kContextOffset));
- context = tmp;
+ context = tmp.reg();
}
// If no outer scope calls eval, we do not need to check more
// context extensions. If we have reached an eval scope, we check
@@ -3383,8 +3725,8 @@ Result CodeGenerator::LoadFromGlobalSlotCheckExtensions(
// Loop up the context chain. There is no frame effect so it is
// safe to use raw labels here.
Label next, fast;
- if (!context.reg().is(tmp.reg())) {
- __ mov(tmp.reg(), context.reg());
+ if (!context.is(tmp.reg())) {
+ __ mov(tmp.reg(), context);
}
__ bind(&next);
// Terminate at global context.
@@ -3400,7 +3742,6 @@ Result CodeGenerator::LoadFromGlobalSlotCheckExtensions(
__ jmp(&next);
__ bind(&fast);
}
- context.Unuse();
tmp.Unuse();
// All extension objects were empty and it is safe to use a global
@@ -3425,11 +3766,15 @@ void CodeGenerator::StoreToSlot(Slot* slot, InitState init_state) {
if (slot->type() == Slot::LOOKUP) {
ASSERT(slot->var()->is_dynamic());
- // For now, just do a runtime call.
- frame_->Push(esi);
- frame_->Push(slot->var()->name());
+ // For now, just do a runtime call. Since the call is inevitable,
+ // we eagerly sync the virtual frame so we can directly push the
+ // arguments into place.
+ frame_->SyncRange(0, frame_->element_count() - 1);
- Result value(this);
+ frame_->EmitPush(esi);
+ frame_->EmitPush(Immediate(slot->var()->name()));
+
+ Result value;
if (init_state == CONST_INIT) {
// Same as the case for a normal store, but ignores attribute
// (e.g. READ_ONLY) of context slot so that we can initialize const
@@ -3457,7 +3802,7 @@ void CodeGenerator::StoreToSlot(Slot* slot, InitState init_state) {
} else {
ASSERT(!slot->var()->is_dynamic());
- JumpTarget exit(this);
+ JumpTarget exit;
if (init_state == CONST_INIT) {
ASSERT(slot->var()->mode() == Variable::CONST);
// Only the first const initialization must be executed (the slot
@@ -3467,7 +3812,7 @@ void CodeGenerator::StoreToSlot(Slot* slot, InitState init_state) {
// We spill the frame in the code below because the direct-frame
// access of SlotOperand is potentially unsafe with an unspilled
// frame.
- VirtualFrame::SpilledScope spilled_scope(this);
+ VirtualFrame::SpilledScope spilled_scope;
Comment cmnt(masm_, "[ Init const");
__ mov(ecx, SlotOperand(slot, ecx));
__ cmp(ecx, Factory::the_hole_value());
@@ -3557,44 +3902,45 @@ bool CodeGenerator::IsUnsafeSmi(Handle<Object> value) {
}
+// Materialize the regexp literal 'node' in the literals array
+// 'literals' of the function. Leave the regexp boilerplate in
+// 'boilerplate'.
class DeferredRegExpLiteral: public DeferredCode {
public:
- DeferredRegExpLiteral(CodeGenerator* generator, RegExpLiteral* node)
- : DeferredCode(generator), node_(node) {
+ DeferredRegExpLiteral(Register boilerplate,
+ Register literals,
+ RegExpLiteral* node)
+ : boilerplate_(boilerplate), literals_(literals), node_(node) {
set_comment("[ DeferredRegExpLiteral");
}
- virtual void Generate();
+ void Generate();
private:
+ Register boilerplate_;
+ Register literals_;
RegExpLiteral* node_;
};
void DeferredRegExpLiteral::Generate() {
- Result literals(generator());
- enter()->Bind(&literals);
// Since the entry is undefined we call the runtime system to
// compute the literal.
-
- VirtualFrame* frame = generator()->frame();
// Literal array (0).
- frame->Push(&literals);
+ __ push(literals_);
// Literal index (1).
- frame->Push(Smi::FromInt(node_->literal_index()));
+ __ push(Immediate(Smi::FromInt(node_->literal_index())));
// RegExp pattern (2).
- frame->Push(node_->pattern());
+ __ push(Immediate(node_->pattern()));
// RegExp flags (3).
- frame->Push(node_->flags());
- Result boilerplate =
- frame->CallRuntime(Runtime::kMaterializeRegExpLiteral, 4);
- exit_.Jump(&boilerplate);
+ __ push(Immediate(node_->flags()));
+ __ CallRuntime(Runtime::kMaterializeRegExpLiteral, 4);
+ if (!boilerplate_.is(eax)) __ mov(boilerplate_, eax);
}
void CodeGenerator::VisitRegExpLiteral(RegExpLiteral* node) {
Comment cmnt(masm_, "[ RegExp Literal");
- DeferredRegExpLiteral* deferred = new DeferredRegExpLiteral(this, node);
// Retrieve the literals array and check the allocated entry. Begin
// with a writable copy of the function of this activation in a
@@ -3609,67 +3955,63 @@ void CodeGenerator::VisitRegExpLiteral(RegExpLiteral* node) {
FieldOperand(literals.reg(), JSFunction::kLiteralsOffset));
// Load the literal at the ast saved index.
- int literal_offset =
- FixedArray::kHeaderSize + node->literal_index() * kPointerSize;
Result boilerplate = allocator_->Allocate();
ASSERT(boilerplate.is_valid());
+ int literal_offset =
+ FixedArray::kHeaderSize + node->literal_index() * kPointerSize;
__ mov(boilerplate.reg(), FieldOperand(literals.reg(), literal_offset));
// Check whether we need to materialize the RegExp object. If so,
// jump to the deferred code passing the literals array.
+ DeferredRegExpLiteral* deferred =
+ new DeferredRegExpLiteral(boilerplate.reg(), literals.reg(), node);
__ cmp(boilerplate.reg(), Factory::undefined_value());
- deferred->enter()->Branch(equal, &literals, not_taken);
-
+ deferred->Branch(equal);
+ deferred->BindExit();
literals.Unuse();
- // The deferred code returns the boilerplate object.
- deferred->BindExit(&boilerplate);
// Push the boilerplate object.
frame_->Push(&boilerplate);
}
-// This deferred code stub will be used for creating the boilerplate
-// by calling Runtime_CreateObjectLiteral.
-// Each created boilerplate is stored in the JSFunction and they are
-// therefore context dependent.
+// Materialize the object literal 'node' in the literals array
+// 'literals' of the function. Leave the object boilerplate in
+// 'boilerplate'.
class DeferredObjectLiteral: public DeferredCode {
public:
- DeferredObjectLiteral(CodeGenerator* generator,
+ DeferredObjectLiteral(Register boilerplate,
+ Register literals,
ObjectLiteral* node)
- : DeferredCode(generator), node_(node) {
+ : boilerplate_(boilerplate), literals_(literals), node_(node) {
set_comment("[ DeferredObjectLiteral");
}
- virtual void Generate();
+ void Generate();
private:
+ Register boilerplate_;
+ Register literals_;
ObjectLiteral* node_;
};
void DeferredObjectLiteral::Generate() {
- Result literals(generator());
- enter()->Bind(&literals);
// Since the entry is undefined we call the runtime system to
// compute the literal.
-
- VirtualFrame* frame = generator()->frame();
// Literal array (0).
- frame->Push(&literals);
+ __ push(literals_);
// Literal index (1).
- frame->Push(Smi::FromInt(node_->literal_index()));
+ __ push(Immediate(Smi::FromInt(node_->literal_index())));
// Constant properties (2).
- frame->Push(node_->constant_properties());
- Result boilerplate =
- frame->CallRuntime(Runtime::kCreateObjectLiteralBoilerplate, 3);
- exit_.Jump(&boilerplate);
+ __ push(Immediate(node_->constant_properties()));
+ __ CallRuntime(Runtime::kCreateObjectLiteralBoilerplate, 3);
+ if (!boilerplate_.is(eax)) __ mov(boilerplate_, eax);
}
void CodeGenerator::VisitObjectLiteral(ObjectLiteral* node) {
Comment cmnt(masm_, "[ ObjectLiteral");
- DeferredObjectLiteral* deferred = new DeferredObjectLiteral(this, node);
// Retrieve the literals array and check the allocated entry. Begin
// with a writable copy of the function of this activation in a
@@ -3684,20 +4026,20 @@ void CodeGenerator::VisitObjectLiteral(ObjectLiteral* node) {
FieldOperand(literals.reg(), JSFunction::kLiteralsOffset));
// Load the literal at the ast saved index.
- int literal_offset =
- FixedArray::kHeaderSize + node->literal_index() * kPointerSize;
Result boilerplate = allocator_->Allocate();
ASSERT(boilerplate.is_valid());
+ int literal_offset =
+ FixedArray::kHeaderSize + node->literal_index() * kPointerSize;
__ mov(boilerplate.reg(), FieldOperand(literals.reg(), literal_offset));
// Check whether we need to materialize the object literal boilerplate.
// If so, jump to the deferred code passing the literals array.
+ DeferredObjectLiteral* deferred =
+ new DeferredObjectLiteral(boilerplate.reg(), literals.reg(), node);
__ cmp(boilerplate.reg(), Factory::undefined_value());
- deferred->enter()->Branch(equal, &literals, not_taken);
-
+ deferred->Branch(equal);
+ deferred->BindExit();
literals.Unuse();
- // The deferred code returns the boilerplate object.
- deferred->BindExit(&boilerplate);
// Push the boilerplate object.
frame_->Push(&boilerplate);
@@ -3767,47 +4109,42 @@ void CodeGenerator::VisitObjectLiteral(ObjectLiteral* node) {
}
-// This deferred code stub will be used for creating the boilerplate
-// by calling Runtime_CreateArrayLiteralBoilerplate.
-// Each created boilerplate is stored in the JSFunction and they are
-// therefore context dependent.
+// Materialize the array literal 'node' in the literals array 'literals'
+// of the function. Leave the array boilerplate in 'boilerplate'.
class DeferredArrayLiteral: public DeferredCode {
public:
- DeferredArrayLiteral(CodeGenerator* generator,
+ DeferredArrayLiteral(Register boilerplate,
+ Register literals,
ArrayLiteral* node)
- : DeferredCode(generator), node_(node) {
+ : boilerplate_(boilerplate), literals_(literals), node_(node) {
set_comment("[ DeferredArrayLiteral");
}
- virtual void Generate();
+ void Generate();
private:
+ Register boilerplate_;
+ Register literals_;
ArrayLiteral* node_;
};
void DeferredArrayLiteral::Generate() {
- Result literals(generator());
- enter()->Bind(&literals);
// Since the entry is undefined we call the runtime system to
// compute the literal.
-
- VirtualFrame* frame = generator()->frame();
// Literal array (0).
- frame->Push(&literals);
+ __ push(literals_);
// Literal index (1).
- frame->Push(Smi::FromInt(node_->literal_index()));
+ __ push(Immediate(Smi::FromInt(node_->literal_index())));
// Constant properties (2).
- frame->Push(node_->literals());
- Result boilerplate =
- frame->CallRuntime(Runtime::kCreateArrayLiteralBoilerplate, 3);
- exit_.Jump(&boilerplate);
+ __ push(Immediate(node_->literals()));
+ __ CallRuntime(Runtime::kCreateArrayLiteralBoilerplate, 3);
+ if (!boilerplate_.is(eax)) __ mov(boilerplate_, eax);
}
void CodeGenerator::VisitArrayLiteral(ArrayLiteral* node) {
Comment cmnt(masm_, "[ ArrayLiteral");
- DeferredArrayLiteral* deferred = new DeferredArrayLiteral(this, node);
// Retrieve the literals array and check the allocated entry. Begin
// with a writable copy of the function of this activation in a
@@ -3822,24 +4159,23 @@ void CodeGenerator::VisitArrayLiteral(ArrayLiteral* node) {
FieldOperand(literals.reg(), JSFunction::kLiteralsOffset));
// Load the literal at the ast saved index.
- int literal_offset =
- FixedArray::kHeaderSize + node->literal_index() * kPointerSize;
Result boilerplate = allocator_->Allocate();
ASSERT(boilerplate.is_valid());
+ int literal_offset =
+ FixedArray::kHeaderSize + node->literal_index() * kPointerSize;
__ mov(boilerplate.reg(), FieldOperand(literals.reg(), literal_offset));
// Check whether we need to materialize the object literal boilerplate.
// If so, jump to the deferred code passing the literals array.
+ DeferredArrayLiteral* deferred =
+ new DeferredArrayLiteral(boilerplate.reg(), literals.reg(), node);
__ cmp(boilerplate.reg(), Factory::undefined_value());
- deferred->enter()->Branch(equal, &literals, not_taken);
-
+ deferred->Branch(equal);
+ deferred->BindExit();
literals.Unuse();
- // The deferred code returns the boilerplate object.
- deferred->BindExit(&boilerplate);
- // Push the resulting array literal on the stack.
+ // Push the resulting array literal boilerplate on the stack.
frame_->Push(&boilerplate);
-
// Clone the boilerplate object.
Runtime::FunctionId clone_function_id = Runtime::kCloneLiteralBoilerplate;
if (node->depth() == 1) {
@@ -4058,15 +4394,23 @@ void CodeGenerator::VisitCall(Call* node) {
// JavaScript example: 'with (obj) foo(1, 2, 3)' // foo is in obj
// ----------------------------------
- // Load the function
- frame_->Push(esi);
- frame_->Push(var->name());
+ // Load the function from the context. Sync the frame so we can
+ // push the arguments directly into place.
+ frame_->SyncRange(0, frame_->element_count() - 1);
+ frame_->EmitPush(esi);
+ frame_->EmitPush(Immediate(var->name()));
frame_->CallRuntime(Runtime::kLoadContextSlot, 2);
- // eax: slot value; edx: receiver
+ // The runtime call returns a pair of values in eax and edx. The
+ // looked-up function is in eax and the receiver is in edx. These
+ // register references are not ref counted here. We spill them
+ // eagerly since they are arguments to an inevitable call (and are
+ // not sharable by the arguments).
+ ASSERT(!allocator()->is_used(eax));
+ frame_->EmitPush(eax);
// Load the receiver.
- frame_->Push(eax);
- frame_->Push(edx);
+ ASSERT(!allocator()->is_used(edx));
+ frame_->EmitPush(edx);
// Call the function.
CallWithArguments(args, node->position());
@@ -4218,7 +4562,8 @@ void CodeGenerator::VisitCallEval(CallEval* node) {
// Call the function.
CodeForSourcePosition(node->position());
- CallFunctionStub call_function(arg_count);
+ InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
+ CallFunctionStub call_function(arg_count, in_loop);
result = frame_->CallStub(&call_function, arg_count + 1);
// Restore the context and overwrite the function on the stack with
@@ -4282,13 +4627,13 @@ void CodeGenerator::GenerateIsNonNegativeSmi(ZoneList<Expression*>* args) {
void CodeGenerator::GenerateFastCharCodeAt(ZoneList<Expression*>* args) {
ASSERT(args->length() == 2);
- JumpTarget slow_case(this);
- JumpTarget end(this);
- JumpTarget not_a_flat_string(this);
- JumpTarget a_cons_string(this);
- JumpTarget try_again_with_new_string(this, JumpTarget::BIDIRECTIONAL);
- JumpTarget ascii_string(this);
- JumpTarget got_char_code(this);
+ JumpTarget slow_case;
+ JumpTarget end;
+ JumpTarget not_a_flat_string;
+ JumpTarget a_cons_string;
+ JumpTarget try_again_with_new_string(JumpTarget::BIDIRECTIONAL);
+ JumpTarget ascii_string;
+ JumpTarget got_char_code;
Load(args->at(0));
Load(args->at(1));
@@ -4436,7 +4781,7 @@ void CodeGenerator::GenerateArgumentsLength(ZoneList<Expression*>* args) {
ASSERT(args->length() == 0);
// ArgumentsAccessStub takes the parameter count as an input argument
// in register eax. Create a constant result for it.
- Result count(Handle<Smi>(Smi::FromInt(scope_->num_parameters())), this);
+ Result count(Handle<Smi>(Smi::FromInt(scope_->num_parameters())));
// Call the shared stub to get to the arguments.length.
ArgumentsAccessStub stub(ArgumentsAccessStub::READ_LENGTH);
Result result = frame_->CallStub(&stub, &count);
@@ -4446,7 +4791,7 @@ void CodeGenerator::GenerateArgumentsLength(ZoneList<Expression*>* args) {
void CodeGenerator::GenerateValueOf(ZoneList<Expression*>* args) {
ASSERT(args->length() == 1);
- JumpTarget leave(this);
+ JumpTarget leave;
Load(args->at(0)); // Load the object.
frame_->Dup();
Result object = frame_->Pop();
@@ -4470,7 +4815,7 @@ void CodeGenerator::GenerateValueOf(ZoneList<Expression*>* args) {
void CodeGenerator::GenerateSetValueOf(ZoneList<Expression*>* args) {
ASSERT(args->length() == 2);
- JumpTarget leave(this);
+ JumpTarget leave;
Load(args->at(0)); // Load the object.
Load(args->at(1)); // Load the value.
Result value = frame_->Pop();
@@ -4519,7 +4864,7 @@ void CodeGenerator::GenerateArgumentsAccess(ZoneList<Expression*>* args) {
Load(args->at(0));
Result key = frame_->Pop();
// Explicitly create a constant result.
- Result count(Handle<Smi>(Smi::FromInt(scope_->num_parameters())), this);
+ Result count(Handle<Smi>(Smi::FromInt(scope_->num_parameters())));
// Call the shared stub to get to arguments[key].
ArgumentsAccessStub stub(ArgumentsAccessStub::READ_ELEMENT);
Result result = frame_->CallStub(&stub, &key, &count);
@@ -4582,9 +4927,10 @@ void CodeGenerator::VisitCallRuntime(CallRuntime* node) {
}
if (function == NULL) {
- // Call the JS runtime function. Pass 0 as the loop nesting depth
- // because we do not handle runtime calls specially in loops.
- Result answer = frame_->CallCallIC(RelocInfo::CODE_TARGET, arg_count, 0);
+ // Call the JS runtime function.
+ Result answer = frame_->CallCallIC(RelocInfo::CODE_TARGET,
+ arg_count,
+ loop_nesting_);
frame_->RestoreContextRegister();
frame_->SetElementAt(0, &answer);
} else {
@@ -4633,12 +4979,17 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
return;
} else if (slot != NULL && slot->type() == Slot::LOOKUP) {
- // lookup the context holding the named variable
- frame_->Push(esi);
- frame_->Push(variable->name());
+ // Call the runtime to look up the context holding the named
+ // variable. Sync the virtual frame eagerly so we can push the
+ // arguments directly into place.
+ frame_->SyncRange(0, frame_->element_count() - 1);
+ frame_->EmitPush(esi);
+ frame_->EmitPush(Immediate(variable->name()));
Result context = frame_->CallRuntime(Runtime::kLookupContext, 2);
- frame_->Push(&context);
- frame_->Push(variable->name());
+ ASSERT(context.is_register());
+ frame_->EmitPush(context.reg());
+ context.Unuse();
+ frame_->EmitPush(Immediate(variable->name()));
Result answer = frame_->InvokeBuiltin(Builtins::DELETE,
CALL_FUNCTION, 2);
frame_->Push(&answer);
@@ -4699,8 +5050,8 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
case Token::BIT_NOT: {
// Smi check.
- JumpTarget smi_label(this);
- JumpTarget continue_label(this);
+ JumpTarget smi_label;
+ JumpTarget continue_label;
Result operand = frame_->Pop();
operand.ToRegister();
__ test(operand.reg(), Immediate(kSmiTagMask));
@@ -4723,7 +5074,7 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
case Token::ADD: {
// Smi check.
- JumpTarget continue_label(this);
+ JumpTarget continue_label;
Result operand = frame_->Pop();
operand.ToRegister();
__ test(operand.reg(), Immediate(kSmiTagMask));
@@ -4745,57 +5096,89 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
}
-class DeferredCountOperation: public DeferredCode {
+// The value in dst was optimistically incremented or decremented. The
+// result overflowed or was not smi tagged. Undo the operation, call
+// into the runtime to convert the argument to a number, and call the
+// specialized add or subtract stub. The result is left in dst.
+class DeferredPrefixCountOperation: public DeferredCode {
public:
- DeferredCountOperation(CodeGenerator* generator,
- bool is_postfix,
- bool is_increment,
- int target_size)
- : DeferredCode(generator),
- is_postfix_(is_postfix),
- is_increment_(is_increment),
- target_size_(target_size) {
+ DeferredPrefixCountOperation(Register dst, bool is_increment)
+ : dst_(dst), is_increment_(is_increment) {
set_comment("[ DeferredCountOperation");
}
virtual void Generate();
private:
- bool is_postfix_;
+ Register dst_;
bool is_increment_;
- int target_size_;
};
-void DeferredCountOperation::Generate() {
- CodeGenerator* cgen = generator();
- Result value(cgen);
- enter()->Bind(&value);
- VirtualFrame* frame = cgen->frame();
+void DeferredPrefixCountOperation::Generate() {
+ // Undo the optimistic smi operation.
+ if (is_increment_) {
+ __ sub(Operand(dst_), Immediate(Smi::FromInt(1)));
+ } else {
+ __ add(Operand(dst_), Immediate(Smi::FromInt(1)));
+ }
+ __ push(dst_);
+ __ InvokeBuiltin(Builtins::TO_NUMBER, CALL_FUNCTION);
+ __ push(eax);
+ __ push(Immediate(Smi::FromInt(1)));
+ if (is_increment_) {
+ __ CallRuntime(Runtime::kNumberAdd, 2);
+ } else {
+ __ CallRuntime(Runtime::kNumberSub, 2);
+ }
+ if (!dst_.is(eax)) __ mov(dst_, eax);
+}
+
+
+// The value in dst was optimistically incremented or decremented. The
+// result overflowed or was not smi tagged. Undo the operation and call
+// into the runtime to convert the argument to a number. Update the
+// original value in old. Call the specialized add or subtract stub.
+// The result is left in dst.
+class DeferredPostfixCountOperation: public DeferredCode {
+ public:
+ DeferredPostfixCountOperation(Register dst, Register old, bool is_increment)
+ : dst_(dst), old_(old), is_increment_(is_increment) {
+ set_comment("[ DeferredCountOperation");
+ }
+
+ virtual void Generate();
+
+ private:
+ Register dst_;
+ Register old_;
+ bool is_increment_;
+};
+
+
+void DeferredPostfixCountOperation::Generate() {
// Undo the optimistic smi operation.
- value.ToRegister();
- frame->Spill(value.reg());
if (is_increment_) {
- __ sub(Operand(value.reg()), Immediate(Smi::FromInt(1)));
+ __ sub(Operand(dst_), Immediate(Smi::FromInt(1)));
} else {
- __ add(Operand(value.reg()), Immediate(Smi::FromInt(1)));
- }
- frame->Push(&value);
- value = frame->InvokeBuiltin(Builtins::TO_NUMBER, CALL_FUNCTION, 1);
- frame->Push(&value);
- if (is_postfix_) { // Fix up copy of old value with ToNumber(value).
- // This is only safe because VisitCountOperation makes this frame slot
- // beneath the reference a register, which is spilled at the above call.
- // We cannot safely write to constants or copies below the water line.
- frame->StoreToElementAt(target_size_ + 1);
- }
- frame->Push(Smi::FromInt(1));
+ __ add(Operand(dst_), Immediate(Smi::FromInt(1)));
+ }
+ __ push(dst_);
+ __ InvokeBuiltin(Builtins::TO_NUMBER, CALL_FUNCTION);
+
+ // Save the result of ToNumber to use as the old value.
+ __ push(eax);
+
+ // Call the runtime for the addition or subtraction.
+ __ push(eax);
+ __ push(Immediate(Smi::FromInt(1)));
if (is_increment_) {
- value = frame->CallRuntime(Runtime::kNumberAdd, 2);
+ __ CallRuntime(Runtime::kNumberAdd, 2);
} else {
- value = frame->CallRuntime(Runtime::kNumberSub, 2);
+ __ CallRuntime(Runtime::kNumberSub, 2);
}
- exit_.Jump(&value);
+ if (!dst_.is(eax)) __ mov(dst_, eax);
+ __ pop(old_);
}
@@ -4808,96 +5191,93 @@ void CodeGenerator::VisitCountOperation(CountOperation* node) {
Variable* var = node->expression()->AsVariableProxy()->AsVariable();
bool is_const = (var != NULL && var->mode() == Variable::CONST);
- // Postfix operators need a stack slot under the reference to hold
- // the old value while the new one is being stored.
- if (is_postfix) {
- frame_->Push(Smi::FromInt(0));
- }
+ // Postfix operations need a stack slot under the reference to hold
+ // the old value while the new value is being stored. This is so that
+ // in the case that storing the new value requires a call, the old
+ // value will be in the frame to be spilled.
+ if (is_postfix) frame_->Push(Smi::FromInt(0));
{ Reference target(this, node->expression());
if (target.is_illegal()) {
// Spoof the virtual frame to have the expected height (one higher
// than on entry).
- if (!is_postfix) {
- frame_->Push(Smi::FromInt(0));
- }
+ if (!is_postfix) frame_->Push(Smi::FromInt(0));
return;
}
target.TakeValue(NOT_INSIDE_TYPEOF);
- DeferredCountOperation* deferred =
- new DeferredCountOperation(this, is_postfix,
- is_increment, target.size());
+ Result new_value = frame_->Pop();
+ new_value.ToRegister();
- Result value = frame_->Pop();
- value.ToRegister();
-
- // Postfix: Store the old value as the result.
+ Result old_value; // Only allocated in the postfix case.
if (is_postfix) {
- // Explicitly back the slot for the old value with a new register.
- // This improves performance in some cases.
- Result old_value = allocator_->Allocate();
+ // Allocate a temporary to preserve the old value.
+ old_value = allocator_->Allocate();
ASSERT(old_value.is_valid());
- __ mov(old_value.reg(), value.reg());
- // SetElement must not create a constant element or a copy in this slot,
- // since we will write to it, below the waterline, in deferred code.
- frame_->SetElementAt(target.size(), &old_value);
+ __ mov(old_value.reg(), new_value.reg());
}
+ // Ensure the new value is writable.
+ frame_->Spill(new_value.reg());
- // Perform optimistic increment/decrement. Ensure the value is
- // writable.
- frame_->Spill(value.reg());
- ASSERT(allocator_->count(value.reg()) == 1);
-
- // In order to combine the overflow and the smi check, we need to
- // be able to allocate a byte register. We attempt to do so
- // without spilling. If we fail, we will generate separate
- // overflow and smi checks.
+ // In order to combine the overflow and the smi tag check, we need
+ // to be able to allocate a byte register. We attempt to do so
+ // without spilling. If we fail, we will generate separate overflow
+ // and smi tag checks.
//
- // We need to allocate and clear the temporary byte register
- // before performing the count operation since clearing the
- // register using xor will clear the overflow flag.
+ // We allocate and clear the temporary byte register before
+ // performing the count operation since clearing the register using
+ // xor will clear the overflow flag.
Result tmp = allocator_->AllocateByteRegisterWithoutSpilling();
if (tmp.is_valid()) {
__ Set(tmp.reg(), Immediate(0));
}
+ DeferredCode* deferred = NULL;
+ if (is_postfix) {
+ deferred = new DeferredPostfixCountOperation(new_value.reg(),
+ old_value.reg(),
+ is_increment);
+ } else {
+ deferred = new DeferredPrefixCountOperation(new_value.reg(),
+ is_increment);
+ }
+
if (is_increment) {
- __ add(Operand(value.reg()), Immediate(Smi::FromInt(1)));
+ __ add(Operand(new_value.reg()), Immediate(Smi::FromInt(1)));
} else {
- __ sub(Operand(value.reg()), Immediate(Smi::FromInt(1)));
+ __ sub(Operand(new_value.reg()), Immediate(Smi::FromInt(1)));
}
- // If the count operation didn't overflow and the result is a
- // valid smi, we're done. Otherwise, we jump to the deferred
- // slow-case code.
- //
- // We combine the overflow and the smi check if we could
- // successfully allocate a temporary byte register.
+ // If the count operation didn't overflow and the result is a valid
+ // smi, we're done. Otherwise, we jump to the deferred slow-case
+ // code.
if (tmp.is_valid()) {
+ // We combine the overflow and the smi tag check if we could
+ // successfully allocate a temporary byte register.
__ setcc(overflow, tmp.reg());
- __ or_(Operand(value.reg()), tmp.reg());
+ __ or_(Operand(tmp.reg()), new_value.reg());
+ __ test(tmp.reg(), Immediate(kSmiTagMask));
tmp.Unuse();
- __ test(value.reg(), Immediate(kSmiTagMask));
- deferred->enter()->Branch(not_zero, &value, not_taken);
- } else { // Otherwise we test separately for overflow and smi check.
- deferred->enter()->Branch(overflow, &value, not_taken);
- __ test(value.reg(), Immediate(kSmiTagMask));
- deferred->enter()->Branch(not_zero, &value, not_taken);
+ deferred->Branch(not_zero);
+ } else {
+ // Otherwise we test separately for overflow and smi tag.
+ deferred->Branch(overflow);
+ __ test(new_value.reg(), Immediate(kSmiTagMask));
+ deferred->Branch(not_zero);
}
+ deferred->BindExit();
- // Store the new value in the target if not const.
- deferred->BindExit(&value);
- frame_->Push(&value);
- if (!is_const) {
- target.SetValue(NOT_CONST_INIT);
- }
- }
+ // Postfix: store the old value in the allocated slot under the
+ // reference.
+ if (is_postfix) frame_->SetElementAt(target.size(), &old_value);
- // Postfix: Discard the new value and use the old.
- if (is_postfix) {
- frame_->Drop();
+ frame_->Push(&new_value);
+ // Non-constant: update the reference.
+ if (!is_const) target.SetValue(NOT_CONST_INIT);
}
+
+ // Postfix: drop the new value and use the old.
+ if (is_postfix) frame_->Drop();
}
@@ -4918,7 +5298,7 @@ void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) {
// is necessary because we assume that if we get control flow on the
// last path out of an expression we got it on all paths.
if (op == Token::AND) {
- JumpTarget is_true(this);
+ JumpTarget is_true;
ControlDestination dest(&is_true, destination()->false_target(), true);
LoadCondition(node->left(), NOT_INSIDE_TYPEOF, &dest, false);
@@ -4956,8 +5336,8 @@ void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) {
// We have a materialized value on the frame, so we exit with
// one on all paths. There are possibly also jumps to is_true
// from nested subexpressions.
- JumpTarget pop_and_continue(this);
- JumpTarget exit(this);
+ JumpTarget pop_and_continue;
+ JumpTarget exit;
// Avoid popping the result if it converts to 'false' using the
// standard ToBoolean() conversion as described in ECMA-262,
@@ -4981,7 +5361,7 @@ void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) {
}
} else if (op == Token::OR) {
- JumpTarget is_false(this);
+ JumpTarget is_false;
ControlDestination dest(destination()->true_target(), &is_false, false);
LoadCondition(node->left(), NOT_INSIDE_TYPEOF, &dest, false);
@@ -5018,8 +5398,8 @@ void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) {
// We have a materialized value on the frame, so we exit with
// one on all paths. There are possibly also jumps to is_false
// from nested subexpressions.
- JumpTarget pop_and_continue(this);
- JumpTarget exit(this);
+ JumpTarget pop_and_continue;
+ JumpTarget exit;
// Avoid popping the result if it converts to 'true' using the
// standard ToBoolean() conversion as described in ECMA-262,
@@ -5248,10 +5628,14 @@ bool CodeGenerator::HasValidEntryRegisters() {
#endif
+// Emit a LoadIC call to get the value from receiver and leave it in
+// dst. The receiver register is restored after the call.
class DeferredReferenceGetNamedValue: public DeferredCode {
public:
- DeferredReferenceGetNamedValue(CodeGenerator* cgen, Handle<String> name)
- : DeferredCode(cgen), name_(name) {
+ DeferredReferenceGetNamedValue(Register dst,
+ Register receiver,
+ Handle<String> name)
+ : dst_(dst), receiver_(receiver), name_(name) {
set_comment("[ DeferredReferenceGetNamedValue");
}
@@ -5261,39 +5645,41 @@ class DeferredReferenceGetNamedValue: public DeferredCode {
private:
Label patch_site_;
+ Register dst_;
+ Register receiver_;
Handle<String> name_;
};
void DeferredReferenceGetNamedValue::Generate() {
- CodeGenerator* cgen = generator();
- Result receiver(cgen);
- enter()->Bind(&receiver);
-
- cgen->frame()->Push(&receiver);
- cgen->frame()->Push(name_);
- Result answer = cgen->frame()->CallLoadIC(RelocInfo::CODE_TARGET);
+ __ push(receiver_);
+ __ Set(ecx, Immediate(name_));
+ Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
+ __ call(ic, RelocInfo::CODE_TARGET);
// The call must be followed by a test eax instruction to indicate
// that the inobject property case was inlined.
- ASSERT(answer.is_register() && answer.reg().is(eax));
- // Store the delta to the map check instruction here in the test instruction.
- // Use masm_-> instead of the double underscore macro since the latter can't
- // return a value.
+ //
+ // Store the delta to the map check instruction here in the test
+ // instruction. Use masm_-> instead of the __ macro since the
+ // latter can't return a value.
int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(patch_site());
- // Here we use masm_-> instead of the double underscore macro because
- // this is the instruction that gets patched and coverage code gets in
- // the way.
- masm_->test(answer.reg(), Immediate(-delta_to_patch_site));
+ // Here we use masm_-> instead of the __ macro because this is the
+ // instruction that gets patched and coverage code gets in the way.
+ masm_->test(eax, Immediate(-delta_to_patch_site));
__ IncrementCounter(&Counters::named_load_inline_miss, 1);
- receiver = cgen->frame()->Pop();
- exit_.Jump(&receiver, &answer);
+
+ if (!dst_.is(eax)) __ mov(dst_, eax);
+ __ pop(receiver_);
}
class DeferredReferenceGetKeyedValue: public DeferredCode {
public:
- DeferredReferenceGetKeyedValue(CodeGenerator* generator, bool is_global)
- : DeferredCode(generator), is_global_(is_global) {
+ explicit DeferredReferenceGetKeyedValue(Register dst,
+ Register receiver,
+ Register key,
+ bool is_global)
+ : dst_(dst), receiver_(receiver), key_(key), is_global_(is_global) {
set_comment("[ DeferredReferenceGetKeyedValue");
}
@@ -5303,17 +5689,16 @@ class DeferredReferenceGetKeyedValue: public DeferredCode {
private:
Label patch_site_;
+ Register dst_;
+ Register receiver_;
+ Register key_;
bool is_global_;
};
void DeferredReferenceGetKeyedValue::Generate() {
- CodeGenerator* cgen = generator();
- Result receiver(cgen);
- Result key(cgen);
- enter()->Bind(&receiver, &key);
- cgen->frame()->Push(&receiver); // First IC argument.
- cgen->frame()->Push(&key); // Second IC argument.
+ __ push(receiver_); // First IC argument.
+ __ push(key_); // Second IC argument.
// Calculate the delta from the IC call instruction to the map check
// cmp instruction in the inlined version. This delta is stored in
@@ -5321,32 +5706,25 @@ void DeferredReferenceGetKeyedValue::Generate() {
// it in the IC initialization code and patch the cmp instruction.
// This means that we cannot allow test instructions after calls to
// KeyedLoadIC stubs in other places.
+ Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
RelocInfo::Mode mode = is_global_
? RelocInfo::CODE_TARGET_CONTEXT
: RelocInfo::CODE_TARGET;
- Result value = cgen->frame()->CallKeyedLoadIC(mode);
- // The result needs to be specifically the eax register because the
- // offset to the patch site will be expected in a test eax
- // instruction.
- ASSERT(value.is_register() && value.reg().is(eax));
+ __ call(ic, mode);
// The delta from the start of the map-compare instruction to the
- // test instruction. We use masm_ directly here instead of the
- // double underscore macro because the macro sometimes uses macro
- // expansion to turn into something that can't return a value. This
- // is encountered when doing generated code coverage tests.
+ // test instruction. We use masm_-> directly here instead of the __
+ // macro because the macro sometimes uses macro expansion to turn
+ // into something that can't return a value. This is encountered
+ // when doing generated code coverage tests.
int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(patch_site());
- // Here we use masm_-> instead of the double underscore macro because this
- // is the instruction that gets patched and coverage code gets in the way.
- masm_->test(value.reg(), Immediate(-delta_to_patch_site));
+ // Here we use masm_-> instead of the __ macro because this is the
+ // instruction that gets patched and coverage code gets in the way.
+ masm_->test(eax, Immediate(-delta_to_patch_site));
__ IncrementCounter(&Counters::keyed_load_inline_miss, 1);
- // The receiver and key were spilled by the call, so their state as
- // constants or copies has been changed. Thus, they need to be
- // "mergable" in the block at the exit label and are therefore
- // passed as return results here.
- key = cgen->frame()->Pop();
- receiver = cgen->frame()->Pop();
- exit_.Jump(&receiver, &key, &value);
+ if (!dst_.is(eax)) __ mov(dst_, eax);
+ __ pop(key_);
+ __ pop(receiver_);
}
@@ -5395,9 +5773,13 @@ void Reference::GetValue(TypeofState typeof_state) {
bool is_global = var != NULL;
ASSERT(!is_global || var->is_global());
- if (is_global || cgen_->scope()->is_global_scope()) {
- // Do not inline the inobject property case for loads from the
- // global object or loads in toplevel code.
+ // Do not inline the inobject property case for loads from the global
+ // object. Also do not inline for unoptimized code. This saves time
+ // in the code generator. Unoptimized code is toplevel code or code
+ // that is not in a loop.
+ if (is_global ||
+ cgen_->scope()->is_global_scope() ||
+ cgen_->loop_nesting() == 0) {
Comment cmnt(masm, "[ Load from named Property");
cgen_->frame()->Push(GetName());
@@ -5413,19 +5795,20 @@ void Reference::GetValue(TypeofState typeof_state) {
} else {
// Inline the inobject property case.
Comment cmnt(masm, "[ Inlined named property load");
- DeferredReferenceGetNamedValue* deferred =
- new DeferredReferenceGetNamedValue(cgen_, GetName());
Result receiver = cgen_->frame()->Pop();
receiver.ToRegister();
- // Check that the receiver is a heap object.
- __ test(receiver.reg(), Immediate(kSmiTagMask));
- deferred->enter()->Branch(zero, &receiver, not_taken);
- // Preallocate the value register to ensure that there is no
- // spill emitted between the patch site label and the offset in
- // the load instruction.
Result value = cgen_->allocator()->Allocate();
ASSERT(value.is_valid());
+ DeferredReferenceGetNamedValue* deferred =
+ new DeferredReferenceGetNamedValue(value.reg(),
+ receiver.reg(),
+ GetName());
+
+ // Check that the receiver is a heap object.
+ __ test(receiver.reg(), Immediate(kSmiTagMask));
+ deferred->Branch(zero);
+
__ bind(deferred->patch_site());
// This is the map check instruction that will be patched (so we can't
// use the double underscore macro that may insert instructions).
@@ -5434,7 +5817,7 @@ void Reference::GetValue(TypeofState typeof_state) {
Immediate(Factory::null_value()));
// This branch is always a forwards branch so it's always a fixed
// size which allows the assert below to succeed and patching to work.
- deferred->enter()->Branch(not_equal, &receiver, not_taken);
+ deferred->Branch(not_equal);
// The delta from the patch label to the load offset must be
// statically known.
@@ -5447,7 +5830,7 @@ void Reference::GetValue(TypeofState typeof_state) {
masm->mov(value.reg(), FieldOperand(receiver.reg(), offset));
__ IncrementCounter(&Counters::named_load_inline, 1);
- deferred->BindExit(&receiver, &value);
+ deferred->BindExit();
cgen_->frame()->Push(&receiver);
cgen_->frame()->Push(&value);
}
@@ -5467,20 +5850,34 @@ void Reference::GetValue(TypeofState typeof_state) {
// patch the map check if appropriate.
if (cgen_->loop_nesting() > 0) {
Comment cmnt(masm, "[ Inlined array index load");
- DeferredReferenceGetKeyedValue* deferred =
- new DeferredReferenceGetKeyedValue(cgen_, is_global);
Result key = cgen_->frame()->Pop();
Result receiver = cgen_->frame()->Pop();
key.ToRegister();
receiver.ToRegister();
+ // Use a fresh temporary to load the elements without destroying
+ // the receiver which is needed for the deferred slow case.
+ Result elements = cgen_->allocator()->Allocate();
+ ASSERT(elements.is_valid());
+
+ // Use a fresh temporary for the index and later the loaded
+ // value.
+ Result index = cgen_->allocator()->Allocate();
+ ASSERT(index.is_valid());
+
+ DeferredReferenceGetKeyedValue* deferred =
+ new DeferredReferenceGetKeyedValue(index.reg(),
+ receiver.reg(),
+ key.reg(),
+ is_global);
+
// Check that the receiver is not a smi (only needed if this
// is not a load from the global context) and that it has the
// expected map.
if (!is_global) {
__ test(receiver.reg(), Immediate(kSmiTagMask));
- deferred->enter()->Branch(zero, &receiver, &key, not_taken);
+ deferred->Branch(zero);
}
// Initially, use an invalid map. The map is patched in the IC
@@ -5489,32 +5886,28 @@ void Reference::GetValue(TypeofState typeof_state) {
// Use masm-> here instead of the double underscore macro since extra
// coverage code can interfere with the patching.
masm->cmp(FieldOperand(receiver.reg(), HeapObject::kMapOffset),
- Immediate(Factory::null_value()));
- deferred->enter()->Branch(not_equal, &receiver, &key, not_taken);
+ Immediate(Factory::null_value()));
+ deferred->Branch(not_equal);
// Check that the key is a smi.
__ test(key.reg(), Immediate(kSmiTagMask));
- deferred->enter()->Branch(not_zero, &receiver, &key, not_taken);
+ deferred->Branch(not_zero);
// Get the elements array from the receiver and check that it
// is not a dictionary.
- Result elements = cgen_->allocator()->Allocate();
- ASSERT(elements.is_valid());
__ mov(elements.reg(),
FieldOperand(receiver.reg(), JSObject::kElementsOffset));
__ cmp(FieldOperand(elements.reg(), HeapObject::kMapOffset),
Immediate(Factory::hash_table_map()));
- deferred->enter()->Branch(equal, &receiver, &key, not_taken);
+ deferred->Branch(equal);
// Shift the key to get the actual index value and check that
// it is within bounds.
- Result index = cgen_->allocator()->Allocate();
- ASSERT(index.is_valid());
__ mov(index.reg(), key.reg());
__ sar(index.reg(), kSmiTagSize);
__ cmp(index.reg(),
FieldOperand(elements.reg(), Array::kLengthOffset));
- deferred->enter()->Branch(above_equal, &receiver, &key, not_taken);
+ deferred->Branch(above_equal);
// Load and check that the result is not the hole. We could
// reuse the index or elements register for the value.
@@ -5531,12 +5924,12 @@ void Reference::GetValue(TypeofState typeof_state) {
elements.Unuse();
index.Unuse();
__ cmp(Operand(value.reg()), Immediate(Factory::the_hole_value()));
- deferred->enter()->Branch(equal, &receiver, &key, not_taken);
+ deferred->Branch(equal);
__ IncrementCounter(&Counters::keyed_load_inline, 1);
+ deferred->BindExit();
// Restore the receiver and key to the frame and push the
// result on top of it.
- deferred->BindExit(&receiver, &key, &value);
cgen_->frame()->Push(&receiver);
cgen_->frame()->Push(&key);
cgen_->frame()->Push(&value);
@@ -5684,340 +6077,6 @@ void ToBooleanStub::Generate(MacroAssembler* masm) {
}
-#undef __
-#define __ ACCESS_MASM(masm_)
-
-Result DeferredInlineBinaryOperation::GenerateInlineCode(Result* left,
- Result* right) {
- // Perform fast-case smi code for the operation (left <op> right) and
- // returns the result in a Result.
- // If any fast-case tests fail, it jumps to the slow-case deferred code,
- // which calls the binary operation stub, with the arguments (in registers)
- // on top of the frame.
- // Consumes its arguments (sets left and right to invalid and frees their
- // registers).
-
- left->ToRegister();
- right->ToRegister();
- // A newly allocated register answer is used to hold the answer.
- // The registers containing left and right are not modified in
- // most cases, so they usually don't need to be spilled in the fast case.
- Result answer = generator()->allocator()->Allocate();
-
- ASSERT(answer.is_valid());
- // Perform the smi check.
- if (left->reg().is(right->reg())) {
- __ test(left->reg(), Immediate(kSmiTagMask));
- } else {
- __ mov(answer.reg(), left->reg());
- __ or_(answer.reg(), Operand(right->reg()));
- ASSERT(kSmiTag == 0); // adjust zero check if not the case
- __ test(answer.reg(), Immediate(kSmiTagMask));
- }
- enter()->Branch(not_zero, left, right, not_taken);
-
- // All operations start by copying the left argument into answer.
- __ mov(answer.reg(), left->reg());
- switch (op_) {
- case Token::ADD:
- __ add(answer.reg(), Operand(right->reg())); // add optimistically
- enter()->Branch(overflow, left, right, not_taken);
- break;
-
- case Token::SUB:
- __ sub(answer.reg(), Operand(right->reg())); // subtract optimistically
- enter()->Branch(overflow, left, right, not_taken);
- break;
-
- case Token::MUL: {
- // If the smi tag is 0 we can just leave the tag on one operand.
- ASSERT(kSmiTag == 0); // adjust code below if not the case
- // Remove tag from the left operand (but keep sign).
- // Left hand operand has been copied into answer.
- __ sar(answer.reg(), kSmiTagSize);
- // Do multiplication of smis, leaving result in answer.
- __ imul(answer.reg(), Operand(right->reg()));
- // Go slow on overflows.
- enter()->Branch(overflow, left, right, not_taken);
- // Check for negative zero result. If product is zero,
- // and one argument is negative, go to slow case.
- // The frame is unchanged in this block, so local control flow can
- // use a Label rather than a JumpTarget.
- Label non_zero_result;
- __ test(answer.reg(), Operand(answer.reg()));
- __ j(not_zero, &non_zero_result, taken);
- __ mov(answer.reg(), left->reg());
- __ or_(answer.reg(), Operand(right->reg()));
- enter()->Branch(negative, left, right, not_taken);
- __ xor_(answer.reg(), Operand(answer.reg())); // Positive 0 is correct.
- __ bind(&non_zero_result);
- break;
- }
-
- case Token::DIV: // Fall through.
- case Token::MOD: {
- // Div and mod use the registers eax and edx. Left and right must
- // be preserved, because the original operands are needed if we switch
- // to the slow case. Move them if either is in eax or edx.
- // The Result answer should be changed into an alias for eax.
- // Precondition:
- // The Results left and right are valid. They may be the same register,
- // and may be unspilled. The Result answer is valid and is distinct
- // from left and right, and is spilled.
- // The value in left is copied to answer.
-
- Result reg_eax = generator()->allocator()->Allocate(eax);
- Result reg_edx = generator()->allocator()->Allocate(edx);
- // These allocations may have failed, if one of left, right, or answer
- // is in register eax or edx.
- bool left_copied_to_eax = false; // We will make sure this becomes true.
-
- // Part 1: Get eax
- if (answer.reg().is(eax)) {
- reg_eax = answer;
- left_copied_to_eax = true;
- } else if (right->reg().is(eax) || left->reg().is(eax)) {
- // We need a non-edx register to move one or both of left and right to.
- // We use answer if it is not edx, otherwise we allocate one.
- if (answer.reg().is(edx)) {
- reg_edx = answer;
- answer = generator()->allocator()->Allocate();
- ASSERT(answer.is_valid());
- }
-
- if (left->reg().is(eax)) {
- reg_eax = *left;
- left_copied_to_eax = true;
- *left = answer;
- }
- if (right->reg().is(eax)) {
- reg_eax = *right;
- *right = answer;
- }
- __ mov(answer.reg(), eax);
- }
- // End of Part 1.
- // reg_eax is valid, and neither left nor right is in eax.
- ASSERT(reg_eax.is_valid());
- ASSERT(!left->reg().is(eax));
- ASSERT(!right->reg().is(eax));
-
- // Part 2: Get edx
- // reg_edx is invalid if and only if either left, right,
- // or answer is in edx. If edx is valid, then either edx
- // was free, or it was answer, but answer was reallocated.
- if (answer.reg().is(edx)) {
- reg_edx = answer;
- } else if (right->reg().is(edx) || left->reg().is(edx)) {
- // Is answer used?
- if (answer.reg().is(eax) || answer.reg().is(left->reg()) ||
- answer.reg().is(right->reg())) {
- answer = generator()->allocator()->Allocate();
- ASSERT(answer.is_valid()); // We cannot hit both Allocate() calls.
- }
- if (left->reg().is(edx)) {
- reg_edx = *left;
- *left = answer;
- }
- if (right->reg().is(edx)) {
- reg_edx = *right;
- *right = answer;
- }
- __ mov(answer.reg(), edx);
- }
- // End of Part 2
- ASSERT(reg_edx.is_valid());
- ASSERT(!left->reg().is(eax));
- ASSERT(!right->reg().is(eax));
-
- answer = reg_eax; // May free answer, if it was never used.
- generator()->frame()->Spill(eax);
- if (!left_copied_to_eax) {
- __ mov(eax, left->reg());
- left_copied_to_eax = true;
- }
- generator()->frame()->Spill(edx);
-
- // Postcondition:
- // reg_eax, reg_edx are valid, correct, and spilled.
- // reg_eax contains the value originally in left
- // left and right are not eax or edx. They may or may not be
- // spilled or distinct.
- // answer is an alias for reg_eax.
-
- // Sign extend eax into edx:eax.
- __ cdq();
- // Check for 0 divisor.
- __ test(right->reg(), Operand(right->reg()));
- enter()->Branch(zero, left, right, not_taken);
- // Divide edx:eax by the right operand.
- __ idiv(right->reg());
- if (op_ == Token::DIV) {
- // Check for negative zero result. If result is zero, and divisor
- // is negative, return a floating point negative zero.
- // The frame is unchanged in this block, so local control flow can
- // use a Label rather than a JumpTarget.
- Label non_zero_result;
- __ test(left->reg(), Operand(left->reg()));
- __ j(not_zero, &non_zero_result, taken);
- __ test(right->reg(), Operand(right->reg()));
- enter()->Branch(negative, left, right, not_taken);
- __ bind(&non_zero_result);
- // Check for the corner case of dividing the most negative smi
- // by -1. We cannot use the overflow flag, since it is not set
- // by idiv instruction.
- ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
- __ cmp(eax, 0x40000000);
- enter()->Branch(equal, left, right, not_taken);
- // Check that the remainder is zero.
- __ test(edx, Operand(edx));
- enter()->Branch(not_zero, left, right, not_taken);
- // Tag the result and store it in register temp.
- ASSERT(kSmiTagSize == times_2); // adjust code if not the case
- __ lea(answer.reg(), Operand(eax, eax, times_1, kSmiTag));
- } else {
- ASSERT(op_ == Token::MOD);
- // Check for a negative zero result. If the result is zero, and the
- // dividend is negative, return a floating point negative zero.
- // The frame is unchanged in this block, so local control flow can
- // use a Label rather than a JumpTarget.
- Label non_zero_result;
- __ test(edx, Operand(edx));
- __ j(not_zero, &non_zero_result, taken);
- __ test(left->reg(), Operand(left->reg()));
- enter()->Branch(negative, left, right, not_taken);
- __ bind(&non_zero_result);
- // The answer is in edx.
- answer = reg_edx;
- }
- break;
- }
- case Token::BIT_OR:
- __ or_(answer.reg(), Operand(right->reg()));
- break;
-
- case Token::BIT_AND:
- __ and_(answer.reg(), Operand(right->reg()));
- break;
-
- case Token::BIT_XOR:
- __ xor_(answer.reg(), Operand(right->reg()));
- break;
-
- case Token::SHL:
- case Token::SHR:
- case Token::SAR:
- // Move right into ecx.
- // Left is in two registers already, so even if left or answer is ecx,
- // we can move right to it, and use the other one.
- // Right operand must be in register cl because x86 likes it that way.
- if (right->reg().is(ecx)) {
- // Right is already in the right place. Left may be in the
- // same register, which causes problems. Always use answer
- // instead of left, even if left is not ecx, since this avoids
- // spilling left.
- *left = answer;
- } else if (left->reg().is(ecx)) {
- generator()->frame()->Spill(left->reg());
- __ mov(left->reg(), right->reg());
- *right = *left;
- *left = answer; // Use copy of left in answer as left.
- } else if (answer.reg().is(ecx)) {
- __ mov(answer.reg(), right->reg());
- *right = answer;
- } else {
- Result reg_ecx = generator()->allocator()->Allocate(ecx);
- ASSERT(reg_ecx.is_valid());
- __ mov(ecx, right->reg());
- *right = reg_ecx;
- // Answer and left both contain the left operand. Use answer, so
- // left is not spilled.
- *left = answer;
- }
- ASSERT(left->reg().is_valid());
- ASSERT(!left->reg().is(ecx));
- ASSERT(right->reg().is(ecx));
- answer.Unuse(); // Answer may now be being used for left or right.
- // We will modify left and right, which we do not do in any other
- // binary operation. The exits to slow code need to restore the
- // original values of left and right, or at least values that give
- // the same answer.
-
- // We are modifying left and right. They must be spilled!
- generator()->frame()->Spill(left->reg());
- generator()->frame()->Spill(right->reg());
-
- // Remove tags from operands (but keep sign).
- __ sar(left->reg(), kSmiTagSize);
- __ sar(ecx, kSmiTagSize);
- // Perform the operation.
- switch (op_) {
- case Token::SAR:
- __ sar(left->reg());
- // No checks of result necessary
- break;
- case Token::SHR: {
- __ shr(left->reg());
- // Check that the *unsigned* result fits in a smi.
- // Neither of the two high-order bits can be set:
- // - 0x80000000: high bit would be lost when smi tagging.
- // - 0x40000000: this number would convert to negative when
- // Smi tagging these two cases can only happen with shifts
- // by 0 or 1 when handed a valid smi.
- // If the answer cannot be represented by a SMI, restore
- // the left and right arguments, and jump to slow case.
- // The low bit of the left argument may be lost, but only
- // in a case where it is dropped anyway.
- JumpTarget result_ok(generator());
- __ test(left->reg(), Immediate(0xc0000000));
- result_ok.Branch(zero, left, taken);
- __ shl(left->reg());
- ASSERT(kSmiTag == 0);
- __ shl(left->reg(), kSmiTagSize);
- __ shl(right->reg(), kSmiTagSize);
- enter()->Jump(left, right);
- result_ok.Bind(left);
- break;
- }
- case Token::SHL: {
- __ shl(left->reg());
- // Check that the *signed* result fits in a smi.
- JumpTarget result_ok(generator());
- __ cmp(left->reg(), 0xc0000000);
- result_ok.Branch(positive, left, taken);
-
- __ shr(left->reg());
- ASSERT(kSmiTag == 0);
- __ shl(left->reg(), kSmiTagSize);
- __ shl(right->reg(), kSmiTagSize);
- enter()->Jump(left, right);
- result_ok.Bind(left);
- break;
- }
- default:
- UNREACHABLE();
- }
- // Smi-tag the result, in left, and make answer an alias for left->
- answer = *left;
- answer.ToRegister();
- ASSERT(kSmiTagSize == times_2); // adjust code if not the case
- __ lea(answer.reg(),
- Operand(answer.reg(), answer.reg(), times_1, kSmiTag));
- break;
-
- default:
- UNREACHABLE();
- break;
- }
- left->Unuse();
- right->Unuse();
- return answer;
-}
-
-
-#undef __
-#define __ ACCESS_MASM(masm)
-
void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) {
// Perform fast-case smi code for the operation (eax <op> ebx) and
// leave result in register eax.
@@ -7101,6 +7160,9 @@ void CEntryStub::GenerateBody(MacroAssembler* masm, bool is_debug_break) {
void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
Label invoke, exit;
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ Label not_outermost_js, not_outermost_js_2;
+#endif
// Setup frame.
__ push(ebp);
@@ -7119,6 +7181,15 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
ExternalReference c_entry_fp(Top::k_c_entry_fp_address);
__ push(Operand::StaticVariable(c_entry_fp));
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ // If this is the outermost JS call, set js_entry_sp value.
+ ExternalReference js_entry_sp(Top::k_js_entry_sp_address);
+ __ cmp(Operand::StaticVariable(js_entry_sp), Immediate(0));
+ __ j(NegateCondition(equal), &not_outermost_js);
+ __ mov(Operand::StaticVariable(js_entry_sp), ebp);
+ __ bind(&not_outermost_js);
+#endif
+
// Call a faked try-block that does the invoke.
__ call(&invoke);
@@ -7162,6 +7233,15 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
// Pop next_sp.
__ add(Operand(esp), Immediate(StackHandlerConstants::kSize - kPointerSize));
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ // If current EBP value is the same as js_entry_sp value, it means that
+ // the current function is the outermost.
+ __ cmp(ebp, Operand::StaticVariable(js_entry_sp));
+ __ j(NegateCondition(equal), &not_outermost_js_2);
+ __ mov(Operand::StaticVariable(js_entry_sp), Immediate(0));
+ __ bind(&not_outermost_js_2);
+#endif
+
// Restore the top frame descriptor from the stack.
__ bind(&exit);
__ pop(Operand::StaticVariable(ExternalReference(Top::k_c_entry_fp_address)));
@@ -7186,7 +7266,7 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
__ j(zero, &slow, not_taken);
// Check that the left hand is a JS object.
- __ mov(eax, FieldOperand(eax, HeapObject::kMapOffset)); // ebx - object map
+ __ mov(eax, FieldOperand(eax, HeapObject::kMapOffset)); // eax - object map
__ movzx_b(ecx, FieldOperand(eax, Map::kInstanceTypeOffset)); // ecx - type
__ cmp(ecx, FIRST_JS_OBJECT_TYPE);
__ j(less, &slow, not_taken);
@@ -7198,6 +7278,8 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
__ TryGetFunctionPrototype(edx, ebx, ecx, &slow);
// Check that the function prototype is a JS object.
+ __ test(ebx, Immediate(kSmiTagMask));
+ __ j(zero, &slow, not_taken);
__ mov(ecx, FieldOperand(ebx, HeapObject::kMapOffset));
__ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
__ cmp(ecx, FIRST_JS_OBJECT_TYPE);
diff --git a/deps/v8/src/ia32/codegen-ia32.h b/deps/v8/src/ia32/codegen-ia32.h
index 0e019570e..9b609a156 100644
--- a/deps/v8/src/ia32/codegen-ia32.h
+++ b/deps/v8/src/ia32/codegen-ia32.h
@@ -28,7 +28,8 @@
#ifndef V8_IA32_CODEGEN_IA32_H_
#define V8_IA32_CODEGEN_IA32_H_
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
// Forward declarations
class DeferredCode;
@@ -332,8 +333,7 @@ class CodeGenerator: public AstVisitor {
// Accessors
Scope* scope() const { return scope_; }
- // Clearing and generating deferred code.
- void ClearDeferred();
+ // Generating deferred code.
void ProcessDeferred();
bool is_eval() { return is_eval_; }
@@ -347,7 +347,6 @@ class CodeGenerator: public AstVisitor {
void IncrementLoopNesting() { loop_nesting_++; }
void DecrementLoopNesting() { loop_nesting_--; }
-
// Node visitors.
void VisitStatements(ZoneList<Statement*>* statements);
@@ -487,8 +486,7 @@ class CodeGenerator: public AstVisitor {
Handle<JSFunction> BuildBoilerplate(FunctionLiteral* node);
void ProcessDeclarations(ZoneList<Declaration*>* declarations);
- Handle<Code> ComputeCallInitialize(int argc);
- Handle<Code> ComputeCallInitializeInLoop(int argc);
+ Handle<Code> ComputeCallInitialize(int argc, InLoopFlag in_loop);
// Declare global variables and functions in the given array of
// name/value pairs.
@@ -581,14 +579,14 @@ class CodeGenerator: public AstVisitor {
void CodeForSourcePosition(int pos);
#ifdef DEBUG
- // True if the registers are valid for entry to a block. There should be
- // no frame-external references to eax, ebx, ecx, edx, or edi.
+ // True if the registers are valid for entry to a block. There should
+ // be no frame-external references to (non-reserved) registers.
bool HasValidEntryRegisters();
#endif
bool is_eval_; // Tells whether code is generated for eval.
Handle<Script> script_;
- List<DeferredCode*> deferred_;
+ ZoneList<DeferredCode*> deferred_;
// Assembler
MacroAssembler* masm_; // to generate code
diff --git a/deps/v8/src/ia32/cpu-ia32.cc b/deps/v8/src/ia32/cpu-ia32.cc
index 9cd6b10bd..82a556570 100644
--- a/deps/v8/src/ia32/cpu-ia32.cc
+++ b/deps/v8/src/ia32/cpu-ia32.cc
@@ -32,7 +32,8 @@
#include "cpu.h"
#include "macro-assembler.h"
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
void CPU::Setup() {
CpuFeatures::Probe();
@@ -46,7 +47,7 @@ void CPU::FlushICache(void* start, size_t size) {
// is patched on an intel CPU the core performing the patching will have its
// own instruction cache updated automatically.
- // If flushing of the instruction cache becomes necessary Windows have the
+ // If flushing of the instruction cache becomes necessary Windows has the
// API function FlushInstructionCache.
}
diff --git a/deps/v8/src/ia32/debug-ia32.cc b/deps/v8/src/ia32/debug-ia32.cc
index 9503cfca7..9913a39ba 100644
--- a/deps/v8/src/ia32/debug-ia32.cc
+++ b/deps/v8/src/ia32/debug-ia32.cc
@@ -31,7 +31,8 @@
#include "debug.h"
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
#ifdef ENABLE_DEBUGGER_SUPPORT
diff --git a/deps/v8/src/ia32/frames-ia32.cc b/deps/v8/src/ia32/frames-ia32.cc
index 1bc62ec22..dea439f24 100644
--- a/deps/v8/src/ia32/frames-ia32.cc
+++ b/deps/v8/src/ia32/frames-ia32.cc
@@ -29,7 +29,8 @@
#include "frames-inl.h"
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
StackFrame::Type StackFrame::ComputeType(State* state) {
diff --git a/deps/v8/src/ia32/frames-ia32.h b/deps/v8/src/ia32/frames-ia32.h
index f86dbe4c1..aec1f4835 100644
--- a/deps/v8/src/ia32/frames-ia32.h
+++ b/deps/v8/src/ia32/frames-ia32.h
@@ -28,7 +28,8 @@
#ifndef V8_IA32_FRAMES_IA32_H_
#define V8_IA32_FRAMES_IA32_H_
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
// Register lists
diff --git a/deps/v8/src/ia32/ic-ia32.cc b/deps/v8/src/ia32/ic-ia32.cc
index 4231bfae7..d7f264d49 100644
--- a/deps/v8/src/ia32/ic-ia32.cc
+++ b/deps/v8/src/ia32/ic-ia32.cc
@@ -32,7 +32,8 @@
#include "runtime.h"
#include "stub-cache.h"
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
// ----------------------------------------------------------------------------
// Static IC stub generators.
@@ -426,7 +427,7 @@ void CallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
// Probe the stub cache.
Code::Flags flags =
- Code::ComputeFlags(Code::CALL_IC, MONOMORPHIC, NORMAL, argc);
+ Code::ComputeFlags(Code::CALL_IC, NOT_IN_LOOP, MONOMORPHIC, NORMAL, argc);
StubCache::GenerateProbe(masm, flags, edx, ecx, ebx);
// If the stub cache probing failed, the receiver might be a value.
@@ -635,7 +636,9 @@ void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
__ mov(eax, Operand(esp, kPointerSize));
// Probe the stub cache.
- Code::Flags flags = Code::ComputeFlags(Code::LOAD_IC, MONOMORPHIC);
+ Code::Flags flags = Code::ComputeFlags(Code::LOAD_IC,
+ NOT_IN_LOOP,
+ MONOMORPHIC);
StubCache::GenerateProbe(masm, flags, eax, ecx, ebx);
// Cache miss: Jump to runtime.
@@ -838,7 +841,9 @@ void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
// Get the receiver from the stack and probe the stub cache.
__ mov(edx, Operand(esp, 4));
- Code::Flags flags = Code::ComputeFlags(Code::STORE_IC, MONOMORPHIC);
+ Code::Flags flags = Code::ComputeFlags(Code::STORE_IC,
+ NOT_IN_LOOP,
+ MONOMORPHIC);
StubCache::GenerateProbe(masm, flags, edx, ecx, ebx);
// Cache miss: Jump to runtime.
diff --git a/deps/v8/src/ia32/jump-target-ia32.cc b/deps/v8/src/ia32/jump-target-ia32.cc
index 6c7d6e35b..9644a16aa 100644
--- a/deps/v8/src/ia32/jump-target-ia32.cc
+++ b/deps/v8/src/ia32/jump-target-ia32.cc
@@ -28,46 +28,51 @@
#include "v8.h"
#include "codegen-inl.h"
+#include "jump-target-inl.h"
#include "register-allocator-inl.h"
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
// -------------------------------------------------------------------------
// JumpTarget implementation.
-#define __ ACCESS_MASM(masm_)
+#define __ ACCESS_MASM(cgen()->masm())
void JumpTarget::DoJump() {
- ASSERT(cgen_ != NULL);
- ASSERT(cgen_->has_valid_frame());
+ ASSERT(cgen()->has_valid_frame());
// Live non-frame registers are not allowed at unconditional jumps
// because we have no way of invalidating the corresponding results
// which are still live in the C++ code.
- ASSERT(cgen_->HasValidEntryRegisters());
+ ASSERT(cgen()->HasValidEntryRegisters());
if (is_bound()) {
// Backward jump. There is an expected frame to merge to.
ASSERT(direction_ == BIDIRECTIONAL);
- cgen_->frame()->MergeTo(entry_frame_);
- cgen_->DeleteFrame();
+ cgen()->frame()->PrepareMergeTo(entry_frame_);
+ cgen()->frame()->MergeTo(entry_frame_);
+ cgen()->DeleteFrame();
+ __ jmp(&entry_label_);
+ } else if (entry_frame_ != NULL) {
+ // Forward jump with a preconfigured entry frame. Assert the
+ // current frame matches the expected one and jump to the block.
+ ASSERT(cgen()->frame()->Equals(entry_frame_));
+ cgen()->DeleteFrame();
__ jmp(&entry_label_);
} else {
- // Forward jump. The current frame is added to the end of the list
- // of frames reaching the target block and a jump to the merge code
- // is emitted.
- AddReachingFrame(cgen_->frame());
+ // Forward jump. Remember the current frame and emit a jump to
+ // its merge code.
+ AddReachingFrame(cgen()->frame());
RegisterFile empty;
- cgen_->SetFrame(NULL, &empty);
+ cgen()->SetFrame(NULL, &empty);
__ jmp(&merge_labels_.last());
}
-
- is_linked_ = !is_bound_;
}
void JumpTarget::DoBranch(Condition cc, Hint hint) {
- ASSERT(cgen_ != NULL);
- ASSERT(cgen_->has_valid_frame());
+ ASSERT(cgen() != NULL);
+ ASSERT(cgen()->has_valid_frame());
if (is_bound()) {
ASSERT(direction_ == BIDIRECTIONAL);
@@ -77,29 +82,29 @@ void JumpTarget::DoBranch(Condition cc, Hint hint) {
// Swap the current frame for a copy (we do the swapping to get
// the off-frame registers off the fall through) to use for the
// branch.
- VirtualFrame* fall_through_frame = cgen_->frame();
+ VirtualFrame* fall_through_frame = cgen()->frame();
VirtualFrame* branch_frame = new VirtualFrame(fall_through_frame);
- RegisterFile non_frame_registers = RegisterAllocator::Reserved();
- cgen_->SetFrame(branch_frame, &non_frame_registers);
+ RegisterFile non_frame_registers;
+ cgen()->SetFrame(branch_frame, &non_frame_registers);
// Check if we can avoid merge code.
- cgen_->frame()->PrepareMergeTo(entry_frame_);
- if (cgen_->frame()->Equals(entry_frame_)) {
+ cgen()->frame()->PrepareMergeTo(entry_frame_);
+ if (cgen()->frame()->Equals(entry_frame_)) {
// Branch right in to the block.
- cgen_->DeleteFrame();
+ cgen()->DeleteFrame();
__ j(cc, &entry_label_, hint);
- cgen_->SetFrame(fall_through_frame, &non_frame_registers);
+ cgen()->SetFrame(fall_through_frame, &non_frame_registers);
return;
}
// Check if we can reuse existing merge code.
for (int i = 0; i < reaching_frames_.length(); i++) {
if (reaching_frames_[i] != NULL &&
- cgen_->frame()->Equals(reaching_frames_[i])) {
+ cgen()->frame()->Equals(reaching_frames_[i])) {
// Branch to the merge code.
- cgen_->DeleteFrame();
+ cgen()->DeleteFrame();
__ j(cc, &merge_labels_[i], hint);
- cgen_->SetFrame(fall_through_frame, &non_frame_registers);
+ cgen()->SetFrame(fall_through_frame, &non_frame_registers);
return;
}
}
@@ -108,21 +113,30 @@ void JumpTarget::DoBranch(Condition cc, Hint hint) {
// around the merge code on the fall through path.
Label original_fall_through;
__ j(NegateCondition(cc), &original_fall_through, NegateHint(hint));
- cgen_->frame()->MergeTo(entry_frame_);
- cgen_->DeleteFrame();
+ cgen()->frame()->MergeTo(entry_frame_);
+ cgen()->DeleteFrame();
__ jmp(&entry_label_);
- cgen_->SetFrame(fall_through_frame, &non_frame_registers);
+ cgen()->SetFrame(fall_through_frame, &non_frame_registers);
__ bind(&original_fall_through);
+ } else if (entry_frame_ != NULL) {
+ // Forward branch with a preconfigured entry frame. Assert the
+ // current frame matches the expected one and branch to the block.
+ ASSERT(cgen()->frame()->Equals(entry_frame_));
+ // Explicitly use the macro assembler instead of __ as forward
+ // branches are expected to be a fixed size (no inserted
+ // coverage-checking instructions please). This is used in
+ // Reference::GetValue.
+ cgen()->masm()->j(cc, &entry_label_, hint);
+
} else {
- // Forward branch. A copy of the current frame is added to the end of the
- // list of frames reaching the target block and a branch to the merge code
- // is emitted. Use masm_-> instead of __ as forward branches are expected
- // to be a fixed size (no inserted coverage-checking instructions please).
- // This is used in Reference::GetValue.
- AddReachingFrame(new VirtualFrame(cgen_->frame()));
- masm_->j(cc, &merge_labels_.last(), hint);
- is_linked_ = true;
+ // Forward branch. A copy of the current frame is remembered and
+ // a branch to the merge code is emitted. Explicitly use the
+ // macro assembler instead of __ as forward branches are expected
+ // to be a fixed size (no inserted coverage-checking instructions
+ // please). This is used in Reference::GetValue.
+ AddReachingFrame(new VirtualFrame(cgen()->frame()));
+ cgen()->masm()->j(cc, &merge_labels_.last(), hint);
}
}
@@ -134,82 +148,107 @@ void JumpTarget::Call() {
// at the label (which should be the only one) is the spilled current
// frame plus an in-memory return address. The "fall-through" frame
// at the return site is the spilled current frame.
- ASSERT(cgen_ != NULL);
- ASSERT(cgen_->has_valid_frame());
+ ASSERT(cgen() != NULL);
+ ASSERT(cgen()->has_valid_frame());
// There are no non-frame references across the call.
- ASSERT(cgen_->HasValidEntryRegisters());
+ ASSERT(cgen()->HasValidEntryRegisters());
ASSERT(!is_linked());
- cgen_->frame()->SpillAll();
- VirtualFrame* target_frame = new VirtualFrame(cgen_->frame());
+ cgen()->frame()->SpillAll();
+ VirtualFrame* target_frame = new VirtualFrame(cgen()->frame());
target_frame->Adjust(1);
+ // We do not expect a call with a preconfigured entry frame.
+ ASSERT(entry_frame_ == NULL);
AddReachingFrame(target_frame);
__ call(&merge_labels_.last());
-
- is_linked_ = !is_bound_;
}
void JumpTarget::DoBind(int mergable_elements) {
- ASSERT(cgen_ != NULL);
+ ASSERT(cgen() != NULL);
ASSERT(!is_bound());
// Live non-frame registers are not allowed at the start of a basic
// block.
- ASSERT(!cgen_->has_valid_frame() || cgen_->HasValidEntryRegisters());
+ ASSERT(!cgen()->has_valid_frame() || cgen()->HasValidEntryRegisters());
- if (direction_ == FORWARD_ONLY) {
- // A simple case: no forward jumps and no possible backward jumps.
- if (!is_linked()) {
- // The stack pointer can be floating above the top of the
- // virtual frame before the bind. Afterward, it should not.
- ASSERT(cgen_->has_valid_frame());
- VirtualFrame* frame = cgen_->frame();
- int difference =
- frame->stack_pointer_ - (frame->elements_.length() - 1);
- if (difference > 0) {
- frame->stack_pointer_ -= difference;
- __ add(Operand(esp), Immediate(difference * kPointerSize));
- }
+ // Fast case: the jump target was manually configured with an entry
+ // frame to use.
+ if (entry_frame_ != NULL) {
+ // Assert no reaching frames to deal with.
+ ASSERT(reaching_frames_.is_empty());
+ ASSERT(!cgen()->has_valid_frame());
- is_bound_ = true;
- return;
+ RegisterFile empty;
+ if (direction_ == BIDIRECTIONAL) {
+ // Copy the entry frame so the original can be used for a
+ // possible backward jump.
+ cgen()->SetFrame(new VirtualFrame(entry_frame_), &empty);
+ } else {
+ // Take ownership of the entry frame.
+ cgen()->SetFrame(entry_frame_, &empty);
+ entry_frame_ = NULL;
}
+ __ bind(&entry_label_);
+ return;
+ }
- // Another simple case: no fall through, a single forward jump,
- // and no possible backward jumps.
- if (!cgen_->has_valid_frame() && reaching_frames_.length() == 1) {
- // Pick up the only reaching frame, take ownership of it, and
- // use it for the block about to be emitted.
- VirtualFrame* frame = reaching_frames_[0];
- RegisterFile reserved = RegisterAllocator::Reserved();
- cgen_->SetFrame(frame, &reserved);
- reaching_frames_[0] = NULL;
- __ bind(&merge_labels_[0]);
-
+ if (!is_linked()) {
+ ASSERT(cgen()->has_valid_frame());
+ if (direction_ == FORWARD_ONLY) {
+ // Fast case: no forward jumps and no possible backward jumps.
// The stack pointer can be floating above the top of the
// virtual frame before the bind. Afterward, it should not.
- int difference =
- frame->stack_pointer_ - (frame->elements_.length() - 1);
+ VirtualFrame* frame = cgen()->frame();
+ int difference = frame->stack_pointer_ - (frame->element_count() - 1);
if (difference > 0) {
frame->stack_pointer_ -= difference;
__ add(Operand(esp), Immediate(difference * kPointerSize));
}
+ } else {
+ ASSERT(direction_ == BIDIRECTIONAL);
+ // Fast case: no forward jumps, possible backward ones. Remove
+ // constants and copies above the watermark on the fall-through
+ // frame and use it as the entry frame.
+ cgen()->frame()->MakeMergable(mergable_elements);
+ entry_frame_ = new VirtualFrame(cgen()->frame());
+ }
+ __ bind(&entry_label_);
+ return;
+ }
- is_linked_ = false;
- is_bound_ = true;
- return;
+ if (direction_ == FORWARD_ONLY &&
+ !cgen()->has_valid_frame() &&
+ reaching_frames_.length() == 1) {
+ // Fast case: no fall-through, a single forward jump, and no
+ // possible backward jumps. Pick up the only reaching frame, take
+ // ownership of it, and use it for the block about to be emitted.
+ VirtualFrame* frame = reaching_frames_[0];
+ RegisterFile empty;
+ cgen()->SetFrame(frame, &empty);
+ reaching_frames_[0] = NULL;
+ __ bind(&merge_labels_[0]);
+
+ // The stack pointer can be floating above the top of the
+ // virtual frame before the bind. Afterward, it should not.
+ int difference = frame->stack_pointer_ - (frame->element_count() - 1);
+ if (difference > 0) {
+ frame->stack_pointer_ -= difference;
+ __ add(Operand(esp), Immediate(difference * kPointerSize));
}
+
+ __ bind(&entry_label_);
+ return;
}
// If there is a current frame, record it as the fall-through. It
// is owned by the reaching frames for now.
bool had_fall_through = false;
- if (cgen_->has_valid_frame()) {
+ if (cgen()->has_valid_frame()) {
had_fall_through = true;
- AddReachingFrame(cgen_->frame());
+ AddReachingFrame(cgen()->frame()); // Return value ignored.
RegisterFile empty;
- cgen_->SetFrame(NULL, &empty);
+ cgen()->SetFrame(NULL, &empty);
}
// Compute the frame to use for entry to the block.
@@ -244,17 +283,17 @@ void JumpTarget::DoBind(int mergable_elements) {
// binding site or as the fall through from a previous merge
// code block. Jump around the code we are about to
// generate.
- if (cgen_->has_valid_frame()) {
- cgen_->DeleteFrame();
+ if (cgen()->has_valid_frame()) {
+ cgen()->DeleteFrame();
__ jmp(&entry_label_);
}
// Pick up the frame for this block. Assume ownership if
// there cannot be backward jumps.
- RegisterFile reserved = RegisterAllocator::Reserved();
+ RegisterFile empty;
if (direction_ == BIDIRECTIONAL) {
- cgen_->SetFrame(new VirtualFrame(frame), &reserved);
+ cgen()->SetFrame(new VirtualFrame(frame), &empty);
} else {
- cgen_->SetFrame(frame, &reserved);
+ cgen()->SetFrame(frame, &empty);
reaching_frames_[i] = NULL;
}
__ bind(&merge_labels_[i]);
@@ -263,23 +302,22 @@ void JumpTarget::DoBind(int mergable_elements) {
// looking for any that can share merge code with this one.
for (int j = 0; j < i; j++) {
VirtualFrame* other = reaching_frames_[j];
- if (other != NULL && other->Equals(cgen_->frame())) {
+ if (other != NULL && other->Equals(cgen()->frame())) {
// Set the reaching frame element to null to avoid
// processing it later, and then bind its entry label.
- delete other;
reaching_frames_[j] = NULL;
__ bind(&merge_labels_[j]);
}
}
// Emit the merge code.
- cgen_->frame()->MergeTo(entry_frame_);
+ cgen()->frame()->MergeTo(entry_frame_);
} else if (i == reaching_frames_.length() - 1 && had_fall_through) {
// If this is the fall through frame, and it didn't need
// merge code, we need to pick up the frame so we can jump
// around subsequent merge blocks if necessary.
- RegisterFile reserved = RegisterAllocator::Reserved();
- cgen_->SetFrame(frame, &reserved);
+ RegisterFile empty;
+ cgen()->SetFrame(frame, &empty);
reaching_frames_[i] = NULL;
}
}
@@ -288,22 +326,17 @@ void JumpTarget::DoBind(int mergable_elements) {
// The code generator may not have a current frame if there was no
// fall through and none of the reaching frames needed merging.
// In that case, clone the entry frame as the current frame.
- if (!cgen_->has_valid_frame()) {
- RegisterFile reserved_registers = RegisterAllocator::Reserved();
- cgen_->SetFrame(new VirtualFrame(entry_frame_), &reserved_registers);
+ if (!cgen()->has_valid_frame()) {
+ RegisterFile empty;
+ cgen()->SetFrame(new VirtualFrame(entry_frame_), &empty);
}
- // There is certainly a current frame equal to the entry frame.
- // Bind the entry frame label.
- __ bind(&entry_label_);
-
// There may be unprocessed reaching frames that did not need
// merge code. They will have unbound merge labels. Bind their
// merge labels to be the same as the entry label and deallocate
// them.
for (int i = 0; i < reaching_frames_.length(); i++) {
if (!merge_labels_[i].is_bound()) {
- delete reaching_frames_[i];
reaching_frames_[i] = NULL;
__ bind(&merge_labels_[i]);
}
@@ -320,15 +353,13 @@ void JumpTarget::DoBind(int mergable_elements) {
// Use a copy of the reaching frame so the original can be saved
// for possible reuse as a backward merge block.
- RegisterFile reserved = RegisterAllocator::Reserved();
- cgen_->SetFrame(new VirtualFrame(reaching_frames_[0]), &reserved);
+ RegisterFile empty;
+ cgen()->SetFrame(new VirtualFrame(reaching_frames_[0]), &empty);
__ bind(&merge_labels_[0]);
- cgen_->frame()->MergeTo(entry_frame_);
- __ bind(&entry_label_);
+ cgen()->frame()->MergeTo(entry_frame_);
}
- is_linked_ = false;
- is_bound_ = true;
+ __ bind(&entry_label_);
}
#undef __
diff --git a/deps/v8/src/ia32/macro-assembler-ia32.cc b/deps/v8/src/ia32/macro-assembler-ia32.cc
index d6d5800fe..7636c4ed8 100644
--- a/deps/v8/src/ia32/macro-assembler-ia32.cc
+++ b/deps/v8/src/ia32/macro-assembler-ia32.cc
@@ -33,7 +33,8 @@
#include "runtime.h"
#include "serialize.h"
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
// -------------------------------------------------------------------------
// MacroAssembler implementation.
@@ -560,8 +561,8 @@ Register MacroAssembler::CheckMaps(JSObject* object, Register object_reg,
void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
- Register scratch,
- Label* miss) {
+ Register scratch,
+ Label* miss) {
Label same_contexts;
ASSERT(!holder_reg.is(scratch));
@@ -631,7 +632,7 @@ void MacroAssembler::NegativeZeroTest(CodeGenerator* cgen,
Register result,
Register op,
JumpTarget* then_target) {
- JumpTarget ok(cgen);
+ JumpTarget ok;
test(result, Operand(result));
ok.Branch(not_zero, taken);
test(op, Operand(op));
diff --git a/deps/v8/src/ia32/macro-assembler-ia32.h b/deps/v8/src/ia32/macro-assembler-ia32.h
index cd7a23391..940a8b462 100644
--- a/deps/v8/src/ia32/macro-assembler-ia32.h
+++ b/deps/v8/src/ia32/macro-assembler-ia32.h
@@ -30,7 +30,8 @@
#include "assembler.h"
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
// Forward declaration.
class JumpTarget;
diff --git a/deps/v8/src/ia32/regexp-macro-assembler-ia32.cc b/deps/v8/src/ia32/regexp-macro-assembler-ia32.cc
index 2a0cefd77..04a5390a2 100644
--- a/deps/v8/src/ia32/regexp-macro-assembler-ia32.cc
+++ b/deps/v8/src/ia32/regexp-macro-assembler-ia32.cc
@@ -35,7 +35,8 @@
#include "ia32/macro-assembler-ia32.h"
#include "ia32/regexp-macro-assembler-ia32.h"
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
/*
* This assembler uses the following register assignment convention
@@ -1194,10 +1195,11 @@ int RegExpMacroAssemblerIA32::CheckStackGuardState(Address* return_address,
const byte* new_address = StringCharacterPosition(*subject, start_index);
if (start_address != new_address) {
- // If there is a difference, update start and end addresses in the
- // RegExp stack frame to match the new value.
+ // If there is a difference, update the object pointer and start and end
+ // addresses in the RegExp stack frame to match the new value.
const byte* end_address = frame_entry<const byte* >(re_frame, kInputEnd);
int byte_length = end_address - start_address;
+ frame_entry<const String*>(re_frame, kInputString) = *subject;
frame_entry<const byte*>(re_frame, kInputStart) = new_address;
frame_entry<const byte*>(re_frame, kInputEnd) = new_address + byte_length;
}
diff --git a/deps/v8/src/ia32/regexp-macro-assembler-ia32.h b/deps/v8/src/ia32/regexp-macro-assembler-ia32.h
index 8c5dd24cc..a06700a54 100644
--- a/deps/v8/src/ia32/regexp-macro-assembler-ia32.h
+++ b/deps/v8/src/ia32/regexp-macro-assembler-ia32.h
@@ -28,7 +28,8 @@
#ifndef V8_IA32_REGEXP_MACRO_ASSEMBLER_IA32_H_
#define V8_IA32_REGEXP_MACRO_ASSEMBLER_IA32_H_
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
class RegExpMacroAssemblerIA32: public RegExpMacroAssembler {
public:
diff --git a/deps/v8/src/ia32/register-allocator-ia32-inl.h b/deps/v8/src/ia32/register-allocator-ia32-inl.h
new file mode 100644
index 000000000..ddee472d2
--- /dev/null
+++ b/deps/v8/src/ia32/register-allocator-ia32-inl.h
@@ -0,0 +1,82 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_IA32_REGISTER_ALLOCATOR_IA32_INL_H_
+#define V8_IA32_REGISTER_ALLOCATOR_IA32_INL_H_
+
+#include "v8.h"
+
+namespace v8 {
+namespace internal {
+
+// -------------------------------------------------------------------------
+// RegisterAllocator implementation.
+
+bool RegisterAllocator::IsReserved(Register reg) {
+ // The code for this test relies on the order of register codes.
+ return reg.code() >= esp.code() && reg.code() <= esi.code();
+}
+
+
+// The register allocator uses small integers to represent the
+// non-reserved assembler registers. The mapping is:
+
+// eax <-> 0, ebx <-> 1, ecx <-> 2, edx <-> 3, edi <-> 4.
+
+int RegisterAllocator::ToNumber(Register reg) {
+ ASSERT(reg.is_valid() && !IsReserved(reg));
+ static int numbers[] = {
+ 0, // eax
+ 2, // ecx
+ 3, // edx
+ 1, // ebx
+ -1, // esp
+ -1, // ebp
+ -1, // esi
+ 4 // edi
+ };
+ return numbers[reg.code()];
+}
+
+
+Register RegisterAllocator::ToRegister(int num) {
+ ASSERT(num >= 0 && num < kNumRegisters);
+ static Register registers[] = { eax, ebx, ecx, edx, edi };
+ return registers[num];
+}
+
+
+void RegisterAllocator::Initialize() {
+ Reset();
+ // The non-reserved edi register is live on JS function entry.
+ Use(edi); // JS function.
+}
+
+
+} } // namespace v8::internal
+
+#endif // V8_IA32_REGISTER_ALLOCATOR_IA32_INL_H_
diff --git a/deps/v8/src/ia32/register-allocator-ia32.cc b/deps/v8/src/ia32/register-allocator-ia32.cc
index b72d76556..2914960ea 100644
--- a/deps/v8/src/ia32/register-allocator-ia32.cc
+++ b/deps/v8/src/ia32/register-allocator-ia32.cc
@@ -30,7 +30,8 @@
#include "codegen-inl.h"
#include "register-allocator-inl.h"
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
// -------------------------------------------------------------------------
// Result implementation.
@@ -38,12 +39,13 @@ namespace v8 { namespace internal {
void Result::ToRegister() {
ASSERT(is_valid());
if (is_constant()) {
- Result fresh = cgen_->allocator()->Allocate();
+ Result fresh = CodeGeneratorScope::Current()->allocator()->Allocate();
ASSERT(fresh.is_valid());
- if (cgen_->IsUnsafeSmi(handle())) {
- cgen_->LoadUnsafeSmi(fresh.reg(), handle());
+ if (CodeGeneratorScope::Current()->IsUnsafeSmi(handle())) {
+ CodeGeneratorScope::Current()->LoadUnsafeSmi(fresh.reg(), handle());
} else {
- cgen_->masm()->Set(fresh.reg(), Immediate(handle()));
+ CodeGeneratorScope::Current()->masm()->Set(fresh.reg(),
+ Immediate(handle()));
}
// This result becomes a copy of the fresh one.
*this = fresh;
@@ -55,23 +57,24 @@ void Result::ToRegister() {
void Result::ToRegister(Register target) {
ASSERT(is_valid());
if (!is_register() || !reg().is(target)) {
- Result fresh = cgen_->allocator()->Allocate(target);
+ Result fresh = CodeGeneratorScope::Current()->allocator()->Allocate(target);
ASSERT(fresh.is_valid());
if (is_register()) {
- cgen_->masm()->mov(fresh.reg(), reg());
+ CodeGeneratorScope::Current()->masm()->mov(fresh.reg(), reg());
} else {
ASSERT(is_constant());
- if (cgen_->IsUnsafeSmi(handle())) {
- cgen_->LoadUnsafeSmi(fresh.reg(), handle());
+ if (CodeGeneratorScope::Current()->IsUnsafeSmi(handle())) {
+ CodeGeneratorScope::Current()->LoadUnsafeSmi(fresh.reg(), handle());
} else {
- cgen_->masm()->Set(fresh.reg(), Immediate(handle()));
+ CodeGeneratorScope::Current()->masm()->Set(fresh.reg(),
+ Immediate(handle()));
}
}
*this = fresh;
} else if (is_register() && reg().is(target)) {
- ASSERT(cgen_->has_valid_frame());
- cgen_->frame()->Spill(target);
- ASSERT(cgen_->allocator()->count(target) == 1);
+ ASSERT(CodeGeneratorScope::Current()->has_valid_frame());
+ CodeGeneratorScope::Current()->frame()->Spill(target);
+ ASSERT(CodeGeneratorScope::Current()->allocator()->count(target) == 1);
}
ASSERT(is_register());
ASSERT(reg().is(target));
@@ -81,53 +84,13 @@ void Result::ToRegister(Register target) {
// -------------------------------------------------------------------------
// RegisterAllocator implementation.
-RegisterFile RegisterAllocator::Reserved() {
- RegisterFile reserved;
- reserved.Use(esp);
- reserved.Use(ebp);
- reserved.Use(esi);
- return reserved;
-}
-
-
-void RegisterAllocator::UnuseReserved(RegisterFile* register_file) {
- register_file->ref_counts_[esp.code()] = 0;
- register_file->ref_counts_[ebp.code()] = 0;
- register_file->ref_counts_[esi.code()] = 0;
-}
-
-
-bool RegisterAllocator::IsReserved(int reg_code) {
- // Test below relies on the order of register codes.
- return reg_code >= esp.code() && reg_code <= esi.code();
-}
-
-
-void RegisterAllocator::Initialize() {
- Reset();
- // The following register is live on function entry, saved in the
- // frame, and available for allocation during execution.
- Use(edi); // JS function.
-}
-
-
-void RegisterAllocator::Reset() {
- registers_.Reset();
- // The following registers are live on function entry and reserved
- // during execution.
- Use(esp); // Stack pointer.
- Use(ebp); // Frame pointer (caller's frame pointer on entry).
- Use(esi); // Context (callee's context on entry).
-}
-
-
Result RegisterAllocator::AllocateByteRegisterWithoutSpilling() {
Result result = AllocateWithoutSpilling();
// Check that the register is a byte register. If not, unuse the
// register if valid and return an invalid result.
if (result.is_valid() && !result.reg().is_byte_register()) {
result.Unuse();
- return Result(cgen_);
+ return Result();
}
return result;
}
diff --git a/deps/v8/src/ia32/register-allocator-ia32.h b/deps/v8/src/ia32/register-allocator-ia32.h
new file mode 100644
index 000000000..e7ce91f4c
--- /dev/null
+++ b/deps/v8/src/ia32/register-allocator-ia32.h
@@ -0,0 +1,43 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_IA32_REGISTER_ALLOCATOR_IA32_H_
+#define V8_IA32_REGISTER_ALLOCATOR_IA32_H_
+
+namespace v8 {
+namespace internal {
+
+class RegisterAllocatorConstants : public AllStatic {
+ public:
+ static const int kNumRegisters = 5;
+ static const int kInvalidRegister = -1;
+};
+
+
+} } // namespace v8::internal
+
+#endif // V8_IA32_REGISTER_ALLOCATOR_IA32_H_
diff --git a/deps/v8/src/ia32/stub-cache-ia32.cc b/deps/v8/src/ia32/stub-cache-ia32.cc
index bdfc3d6c3..b31f7062c 100644
--- a/deps/v8/src/ia32/stub-cache-ia32.cc
+++ b/deps/v8/src/ia32/stub-cache-ia32.cc
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2006-2009 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -31,7 +31,8 @@
#include "codegen-inl.h"
#include "stub-cache.h"
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
#define __ ACCESS_MASM(masm)
@@ -58,7 +59,7 @@ static void ProbeTable(MacroAssembler* masm,
// Check that the flags match what we're looking for.
__ mov(offset, FieldOperand(offset, Code::kFlagsOffset));
- __ and_(offset, ~Code::kFlagsTypeMask);
+ __ and_(offset, ~Code::kFlagsNotUsedInLookup);
__ cmp(offset, flags);
__ j(not_equal, &miss);
@@ -321,6 +322,7 @@ void StubCompiler::GenerateLoadConstant(MacroAssembler* masm,
void StubCompiler::GenerateLoadInterceptor(MacroAssembler* masm,
JSObject* object,
JSObject* holder,
+ Smi* lookup_hint,
Register receiver,
Register name,
Register scratch1,
@@ -339,12 +341,15 @@ void StubCompiler::GenerateLoadInterceptor(MacroAssembler* masm,
__ push(receiver); // receiver
__ push(reg); // holder
__ push(name); // name
+ // TODO(367): Maybe don't push lookup_hint for LOOKUP_IN_HOLDER and/or
+ // LOOKUP_IN_PROTOTYPE, but use a special version of lookup method?
+ __ push(Immediate(lookup_hint));
__ push(scratch2); // restore return address
// Do tail-call to the runtime system.
ExternalReference load_ic_property =
ExternalReference(IC_Utility(IC::kLoadInterceptorProperty));
- __ TailCallRuntime(load_ic_property, 3);
+ __ TailCallRuntime(load_ic_property, 4);
}
@@ -470,7 +475,9 @@ Object* StubCompiler::CompileLazyCompile(Code::Flags flags) {
Object* CallStubCompiler::CompileCallField(Object* object,
JSObject* holder,
int index,
- String* name) {
+ String* name,
+ Code::Flags flags) {
+ ASSERT_EQ(FIELD, Code::ExtractTypeFromFlags(flags));
// ----------- S t a t e -------------
// -----------------------------------
Label miss;
@@ -511,14 +518,16 @@ Object* CallStubCompiler::CompileCallField(Object* object,
__ jmp(ic, RelocInfo::CODE_TARGET);
// Return the generated code.
- return GetCode(FIELD, name);
+ return GetCodeWithFlags(flags, name);
}
Object* CallStubCompiler::CompileCallConstant(Object* object,
JSObject* holder,
JSFunction* function,
- CheckType check) {
+ CheckType check,
+ Code::Flags flags) {
+ ASSERT_EQ(CONSTANT_FUNCTION, Code::ExtractTypeFromFlags(flags));
// ----------- S t a t e -------------
// -----------------------------------
Label miss;
@@ -633,7 +642,7 @@ Object* CallStubCompiler::CompileCallConstant(Object* object,
if (function->shared()->name()->IsString()) {
function_name = String::cast(function->shared()->name());
}
- return GetCode(CONSTANT_FUNCTION, function_name);
+ return GetCodeWithFlags(flags, function_name);
}
@@ -665,11 +674,12 @@ Object* CallStubCompiler::CompileCallInterceptor(Object* object,
__ push(edx); // receiver
__ push(reg); // holder
__ push(Operand(ebp, (argc + 3) * kPointerSize)); // name
+ __ push(Immediate(holder->InterceptorPropertyLookupHint(name)));
// Perform call.
ExternalReference load_interceptor =
ExternalReference(IC_Utility(IC::kLoadInterceptorProperty));
- __ mov(eax, Immediate(3));
+ __ mov(eax, Immediate(4));
__ mov(ebx, Immediate(load_interceptor));
CEntryStub stub;
@@ -969,7 +979,18 @@ Object* LoadStubCompiler::CompileLoadInterceptor(JSObject* receiver,
Label miss;
__ mov(eax, (Operand(esp, kPointerSize)));
- GenerateLoadInterceptor(masm(), receiver, holder, eax, ecx, edx, ebx, &miss);
+ // TODO(368): Compile in the whole chain: all the interceptors in
+ // prototypes and ultimate answer.
+ GenerateLoadInterceptor(masm(),
+ receiver,
+ holder,
+ holder->InterceptorPropertyLookupHint(name),
+ eax,
+ ecx,
+ edx,
+ ebx,
+ &miss);
+
__ bind(&miss);
GenerateLoadMiss(masm(), Code::LOAD_IC);
@@ -1084,7 +1105,15 @@ Object* KeyedLoadStubCompiler::CompileLoadInterceptor(JSObject* receiver,
__ cmp(Operand(eax), Immediate(Handle<String>(name)));
__ j(not_equal, &miss, not_taken);
- GenerateLoadInterceptor(masm(), receiver, holder, ecx, eax, edx, ebx, &miss);
+ GenerateLoadInterceptor(masm(),
+ receiver,
+ holder,
+ Smi::FromInt(JSObject::kLookupInHolder),
+ ecx,
+ eax,
+ edx,
+ ebx,
+ &miss);
__ bind(&miss);
__ DecrementCounter(&Counters::keyed_load_interceptor, 1);
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
diff --git a/deps/v8/src/ia32/virtual-frame-ia32.cc b/deps/v8/src/ia32/virtual-frame-ia32.cc
index ff9f60cdf..5f85de70d 100644
--- a/deps/v8/src/ia32/virtual-frame-ia32.cc
+++ b/deps/v8/src/ia32/virtual-frame-ia32.cc
@@ -31,29 +31,23 @@
#include "register-allocator-inl.h"
#include "scopes.h"
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
-#define __ ACCESS_MASM(masm_)
+#define __ ACCESS_MASM(masm())
// -------------------------------------------------------------------------
// VirtualFrame implementation.
// On entry to a function, the virtual frame already contains the receiver,
// the parameters, and a return address. All frame elements are in memory.
-VirtualFrame::VirtualFrame(CodeGenerator* cgen)
- : cgen_(cgen),
- masm_(cgen->masm()),
- elements_(cgen->scope()->num_parameters()
- + cgen->scope()->num_stack_slots()
- + kPreallocatedElements),
- parameter_count_(cgen->scope()->num_parameters()),
- local_count_(0),
- stack_pointer_(parameter_count_ + 1), // 0-based index of TOS.
- frame_pointer_(kIllegalIndex) {
- for (int i = 0; i < parameter_count_ + 2; i++) {
+VirtualFrame::VirtualFrame()
+ : elements_(parameter_count() + local_count() + kPreallocatedElements),
+ stack_pointer_(parameter_count() + 1) { // 0-based index of TOS.
+ for (int i = 0; i <= stack_pointer_; i++) {
elements_.Add(FrameElement::MemoryElement());
}
- for (int i = 0; i < kNumRegisters; i++) {
+ for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
register_locations_[i] = kIllegalIndex;
}
}
@@ -80,10 +74,10 @@ void VirtualFrame::SyncElementBelowStackPointer(int index) {
break;
case FrameElement::CONSTANT:
- if (cgen_->IsUnsafeSmi(element.handle())) {
- Result temp = cgen_->allocator()->Allocate();
+ if (cgen()->IsUnsafeSmi(element.handle())) {
+ Result temp = cgen()->allocator()->Allocate();
ASSERT(temp.is_valid());
- cgen_->LoadUnsafeSmi(temp.reg(), element.handle());
+ cgen()->LoadUnsafeSmi(temp.reg(), element.handle());
__ mov(Operand(ebp, fp_relative(index)), temp.reg());
} else {
__ Set(Operand(ebp, fp_relative(index)),
@@ -95,7 +89,7 @@ void VirtualFrame::SyncElementBelowStackPointer(int index) {
int backing_index = element.index();
FrameElement backing_element = elements_[backing_index];
if (backing_element.is_memory()) {
- Result temp = cgen_->allocator()->Allocate();
+ Result temp = cgen()->allocator()->Allocate();
ASSERT(temp.is_valid());
__ mov(temp.reg(), Operand(ebp, fp_relative(backing_index)));
__ mov(Operand(ebp, fp_relative(index)), temp.reg());
@@ -132,10 +126,10 @@ void VirtualFrame::SyncElementByPushing(int index) {
break;
case FrameElement::CONSTANT:
- if (cgen_->IsUnsafeSmi(element.handle())) {
- Result temp = cgen_->allocator()->Allocate();
+ if (cgen()->IsUnsafeSmi(element.handle())) {
+ Result temp = cgen()->allocator()->Allocate();
ASSERT(temp.is_valid());
- cgen_->LoadUnsafeSmi(temp.reg(), element.handle());
+ cgen()->LoadUnsafeSmi(temp.reg(), element.handle());
__ push(temp.reg());
} else {
__ push(Immediate(element.handle()));
@@ -162,7 +156,7 @@ void VirtualFrame::SyncElementByPushing(int index) {
// [min(stack_pointer_ + 1,begin), end].
void VirtualFrame::SyncRange(int begin, int end) {
ASSERT(begin >= 0);
- ASSERT(end < elements_.length());
+ ASSERT(end < element_count());
// Sync elements below the range if they have not been materialized
// on the stack.
int start = Min(begin, stack_pointer_ + 1);
@@ -180,11 +174,75 @@ void VirtualFrame::SyncRange(int begin, int end) {
}
+void VirtualFrame::MakeMergable(int mergable_elements) {
+ if (mergable_elements == JumpTarget::kAllElements) {
+ mergable_elements = element_count();
+ }
+ ASSERT(mergable_elements <= element_count());
+
+ int start_index = element_count() - mergable_elements;
+ for (int i = start_index; i < element_count(); i++) {
+ FrameElement element = elements_[i];
+
+ if (element.is_constant() || element.is_copy()) {
+ if (element.is_synced()) {
+ // Just spill.
+ elements_[i] = FrameElement::MemoryElement();
+ } else {
+ // Allocate to a register.
+ FrameElement backing_element; // Invalid if not a copy.
+ if (element.is_copy()) {
+ backing_element = elements_[element.index()];
+ }
+ Result fresh = cgen()->allocator()->Allocate();
+ ASSERT(fresh.is_valid());
+ elements_[i] =
+ FrameElement::RegisterElement(fresh.reg(),
+ FrameElement::NOT_SYNCED);
+ Use(fresh.reg(), i);
+
+ // Emit a move.
+ if (element.is_constant()) {
+ if (cgen()->IsUnsafeSmi(element.handle())) {
+ cgen()->LoadUnsafeSmi(fresh.reg(), element.handle());
+ } else {
+ __ Set(fresh.reg(), Immediate(element.handle()));
+ }
+ } else {
+ ASSERT(element.is_copy());
+ // Copies are only backed by register or memory locations.
+ if (backing_element.is_register()) {
+ // The backing store may have been spilled by allocating,
+ // but that's OK. If it was, the value is right where we
+ // want it.
+ if (!fresh.reg().is(backing_element.reg())) {
+ __ mov(fresh.reg(), backing_element.reg());
+ }
+ } else {
+ ASSERT(backing_element.is_memory());
+ __ mov(fresh.reg(), Operand(ebp, fp_relative(element.index())));
+ }
+ }
+ }
+ // No need to set the copied flag---there are no copies of
+ // copies or constants so the original was not copied.
+ elements_[i].set_static_type(element.static_type());
+ } else {
+ // Clear the copy flag of non-constant, non-copy elements above
+ // the high water mark. They cannot be copied because copes are
+ // always higher than their backing store and copies are not
+ // allowed above the water mark.
+ elements_[i].clear_copied();
+ }
+ }
+}
+
+
void VirtualFrame::MergeTo(VirtualFrame* expected) {
- Comment cmnt(masm_, "[ Merge frame");
+ Comment cmnt(masm(), "[ Merge frame");
// We should always be merging the code generator's current frame to an
// expected frame.
- ASSERT(cgen_->frame() == this);
+ ASSERT(cgen()->frame() == this);
// Adjust the stack pointer upward (toward the top of the virtual
// frame) if necessary.
@@ -198,23 +256,6 @@ void VirtualFrame::MergeTo(VirtualFrame* expected) {
MergeMoveRegistersToRegisters(expected);
MergeMoveMemoryToRegisters(expected);
- // Fix any sync flag problems from the bottom-up and make the copied
- // flags exact. This assumes that the backing store of copies is
- // always lower in the frame.
- for (int i = 0; i < elements_.length(); i++) {
- FrameElement source = elements_[i];
- FrameElement target = expected->elements_[i];
- if (source.is_synced() && !target.is_synced()) {
- elements_[i].clear_sync();
- } else if (!source.is_synced() && target.is_synced()) {
- SyncElementAt(i);
- }
- elements_[i].clear_copied();
- if (elements_[i].is_copy()) {
- elements_[elements_[i].index()].set_copied();
- }
- }
-
// Adjust the stack pointer downward if necessary.
if (stack_pointer_ > expected->stack_pointer_) {
int difference = stack_pointer_ - expected->stack_pointer_;
@@ -240,13 +281,9 @@ void VirtualFrame::MergeMoveRegistersToMemory(VirtualFrame* expected) {
// of the index of the frame element esi is caching or kIllegalIndex
// if esi has not been disturbed.
int esi_caches = kIllegalIndex;
- // A "singleton" memory element.
- FrameElement memory_element = FrameElement::MemoryElement();
- // Loop downward from the stack pointer or the top of the frame if
- // the stack pointer is floating above the frame.
- int start = Min(stack_pointer_, elements_.length() - 1);
- for (int i = start; i >= 0; i--) {
+ for (int i = element_count() - 1; i >= 0; i--) {
FrameElement target = expected->elements_[i];
+ if (target.is_register()) continue; // Handle registers later.
if (target.is_memory()) {
FrameElement source = elements_[i];
switch (source.type()) {
@@ -268,9 +305,9 @@ void VirtualFrame::MergeMoveRegistersToMemory(VirtualFrame* expected) {
case FrameElement::CONSTANT:
if (!source.is_synced()) {
- if (cgen_->IsUnsafeSmi(source.handle())) {
+ if (cgen()->IsUnsafeSmi(source.handle())) {
esi_caches = i;
- cgen_->LoadUnsafeSmi(esi, source.handle());
+ cgen()->LoadUnsafeSmi(esi, source.handle());
__ mov(Operand(ebp, fp_relative(i)), esi);
} else {
__ Set(Operand(ebp, fp_relative(i)), Immediate(source.handle()));
@@ -296,8 +333,8 @@ void VirtualFrame::MergeMoveRegistersToMemory(VirtualFrame* expected) {
}
break;
}
- elements_[i] = memory_element;
}
+ elements_[i] = target;
}
if (esi_caches != kIllegalIndex) {
@@ -310,64 +347,77 @@ void VirtualFrame::MergeMoveRegistersToRegisters(VirtualFrame* expected) {
// We have already done X-to-memory moves.
ASSERT(stack_pointer_ >= expected->stack_pointer_);
- for (int i = 0; i < kNumRegisters; i++) {
+ for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
// Move the right value into register i if it is currently in a register.
- int index = expected->register_locations_[i];
- int use_index = register_locations_[i];
- // Fast check if register is unused in target or already correct
- if (index != kIllegalIndex
- && index != use_index
- && elements_[index].is_register()) {
- Register source = elements_[index].reg();
- Register target = { i };
+ int index = expected->register_location(i);
+ int use_index = register_location(i);
+ // Skip if register i is unused in the target or else if source is
+ // not a register (this is not a register-to-register move).
+ if (index == kIllegalIndex || !elements_[index].is_register()) continue;
+
+ Register target = RegisterAllocator::ToRegister(i);
+ Register source = elements_[index].reg();
+ if (index != use_index) {
if (use_index == kIllegalIndex) { // Target is currently unused.
// Copy contents of source from source to target.
// Set frame element register to target.
- elements_[index].set_reg(target);
Use(target, index);
Unuse(source);
__ mov(target, source);
} else {
// Exchange contents of registers source and target.
+ // Nothing except the register backing use_index has changed.
elements_[use_index].set_reg(source);
- elements_[index].set_reg(target);
- register_locations_[target.code()] = index;
- register_locations_[source.code()] = use_index;
+ set_register_location(target, index);
+ set_register_location(source, use_index);
__ xchg(source, target);
}
}
+
+ if (!elements_[index].is_synced() &&
+ expected->elements_[index].is_synced()) {
+ __ mov(Operand(ebp, fp_relative(index)), target);
+ }
+ elements_[index] = expected->elements_[index];
}
}
-void VirtualFrame::MergeMoveMemoryToRegisters(VirtualFrame *expected) {
+void VirtualFrame::MergeMoveMemoryToRegisters(VirtualFrame* expected) {
// Move memory, constants, and copies to registers. This is the
- // final step and is done from the bottom up so that the backing
+ // final step and since it is not done from the bottom up, but in
+ // register code order, we have special code to ensure that the backing
// elements of copies are in their correct locations when we
// encounter the copies.
- for (int i = 0; i < kNumRegisters; i++) {
- int index = expected->register_locations_[i];
+ for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
+ int index = expected->register_location(i);
if (index != kIllegalIndex) {
FrameElement source = elements_[index];
FrameElement target = expected->elements_[index];
+ Register target_reg = RegisterAllocator::ToRegister(i);
+ ASSERT(target.reg().is(target_reg));
switch (source.type()) {
case FrameElement::INVALID: // Fall through.
UNREACHABLE();
break;
case FrameElement::REGISTER:
- ASSERT(source.reg().is(target.reg()));
- continue; // Go to next iteration. Skips Use(target.reg()) below.
+ ASSERT(source.Equals(target));
+ // Go to next iteration. Skips Use(target_reg) and syncing
+ // below. It is safe to skip syncing because a target
+ // register frame element would only be synced if all source
+ // elements were.
+ continue;
break;
case FrameElement::MEMORY:
ASSERT(index <= stack_pointer_);
- __ mov(target.reg(), Operand(ebp, fp_relative(index)));
+ __ mov(target_reg, Operand(ebp, fp_relative(index)));
break;
case FrameElement::CONSTANT:
- if (cgen_->IsUnsafeSmi(source.handle())) {
- cgen_->LoadUnsafeSmi(target.reg(), source.handle());
+ if (cgen()->IsUnsafeSmi(source.handle())) {
+ cgen()->LoadUnsafeSmi(target_reg, source.handle());
} else {
- __ Set(target.reg(), Immediate(source.handle()));
+ __ Set(target_reg, Immediate(source.handle()));
}
break;
@@ -387,21 +437,20 @@ void VirtualFrame::MergeMoveMemoryToRegisters(VirtualFrame *expected) {
Use(new_backing_reg, backing_index);
__ mov(new_backing_reg,
Operand(ebp, fp_relative(backing_index)));
- __ mov(target.reg(), new_backing_reg);
+ __ mov(target_reg, new_backing_reg);
} else {
- __ mov(target.reg(), Operand(ebp, fp_relative(backing_index)));
+ __ mov(target_reg, Operand(ebp, fp_relative(backing_index)));
}
} else {
- __ mov(target.reg(), backing.reg());
+ __ mov(target_reg, backing.reg());
}
}
}
- // Ensure the proper sync state. If the source was memory no
- // code needs to be emitted.
+ // Ensure the proper sync state.
if (target.is_synced() && !source.is_synced()) {
- __ mov(Operand(ebp, fp_relative(index)), target.reg());
+ __ mov(Operand(ebp, fp_relative(index)), target_reg);
}
- Use(target.reg(), index);
+ Use(target_reg, index);
elements_[index] = target;
}
}
@@ -410,7 +459,7 @@ void VirtualFrame::MergeMoveMemoryToRegisters(VirtualFrame *expected) {
void VirtualFrame::Enter() {
// Registers live on entry: esp, ebp, esi, edi.
- Comment cmnt(masm_, "[ Enter JS frame");
+ Comment cmnt(masm(), "[ Enter JS frame");
#ifdef DEBUG
// Verify that edi contains a JS function. The following code
@@ -425,7 +474,6 @@ void VirtualFrame::Enter() {
EmitPush(ebp);
- frame_pointer_ = stack_pointer_;
__ mov(ebp, Operand(esp));
// Store the context in the frame. The context is kept in esi and a
@@ -436,13 +484,13 @@ void VirtualFrame::Enter() {
// Store the function in the frame. The frame owns the register
// reference now (ie, it can keep it in edi or spill it later).
Push(edi);
- SyncElementAt(elements_.length() - 1);
- cgen_->allocator()->Unuse(edi);
+ SyncElementAt(element_count() - 1);
+ cgen()->allocator()->Unuse(edi);
}
void VirtualFrame::Exit() {
- Comment cmnt(masm_, "[ Exit JS frame");
+ Comment cmnt(masm(), "[ Exit JS frame");
// Record the location of the JS exit code for patching when setting
// break point.
__ RecordJSReturn();
@@ -452,34 +500,31 @@ void VirtualFrame::Exit() {
// call instruction to support patching the exit code in the
// debugger. See VisitReturnStatement for the full return sequence.
__ mov(esp, Operand(ebp));
- stack_pointer_ = frame_pointer_;
- for (int i = elements_.length() - 1; i > stack_pointer_; i--) {
+ stack_pointer_ = frame_pointer();
+ for (int i = element_count() - 1; i > stack_pointer_; i--) {
FrameElement last = elements_.RemoveLast();
if (last.is_register()) {
Unuse(last.reg());
}
}
- frame_pointer_ = kIllegalIndex;
EmitPop(ebp);
}
-void VirtualFrame::AllocateStackSlots(int count) {
- ASSERT(height() == 0);
- local_count_ = count;
-
+void VirtualFrame::AllocateStackSlots() {
+ int count = local_count();
if (count > 0) {
- Comment cmnt(masm_, "[ Allocate space for locals");
+ Comment cmnt(masm(), "[ Allocate space for locals");
// The locals are initialized to a constant (the undefined value), but
// we sync them with the actual frame to allocate space for spilling
// them later. First sync everything above the stack pointer so we can
// use pushes to allocate and initialize the locals.
- SyncRange(stack_pointer_ + 1, elements_.length() - 1);
+ SyncRange(stack_pointer_ + 1, element_count() - 1);
Handle<Object> undefined = Factory::undefined_value();
FrameElement initial_value =
FrameElement::ConstantElement(undefined, FrameElement::SYNCED);
- Result temp = cgen_->allocator()->Allocate();
+ Result temp = cgen()->allocator()->Allocate();
ASSERT(temp.is_valid());
__ Set(temp.reg(), Immediate(undefined));
for (int i = 0; i < count; i++) {
@@ -504,7 +549,7 @@ void VirtualFrame::RestoreContextRegister() {
void VirtualFrame::PushReceiverSlotAddress() {
- Result temp = cgen_->allocator()->Allocate();
+ Result temp = cgen()->allocator()->Allocate();
ASSERT(temp.is_valid());
__ lea(temp.reg(), ParameterAt(-1));
Push(&temp);
@@ -518,7 +563,7 @@ int VirtualFrame::InvalidateFrameSlotAt(int index) {
int new_backing_index = kIllegalIndex;
if (original.is_copied()) {
// Verify it is copied, and find first copy.
- for (int i = index + 1; i < elements_.length(); i++) {
+ for (int i = index + 1; i < element_count(); i++) {
if (elements_[i].is_copy() && elements_[i].index() == index) {
new_backing_index = i;
break;
@@ -538,7 +583,7 @@ int VirtualFrame::InvalidateFrameSlotAt(int index) {
// This is the backing store of copies.
Register backing_reg;
if (original.is_memory()) {
- Result fresh = cgen_->allocator()->Allocate();
+ Result fresh = cgen()->allocator()->Allocate();
ASSERT(fresh.is_valid());
Use(fresh.reg(), new_backing_index);
backing_reg = fresh.reg();
@@ -546,7 +591,7 @@ int VirtualFrame::InvalidateFrameSlotAt(int index) {
} else {
// The original was in a register.
backing_reg = original.reg();
- register_locations_[backing_reg.code()] = new_backing_index;
+ set_register_location(backing_reg, new_backing_index);
}
// Invalidate the element at index.
elements_[index] = FrameElement::InvalidElement();
@@ -559,7 +604,7 @@ int VirtualFrame::InvalidateFrameSlotAt(int index) {
FrameElement::RegisterElement(backing_reg, FrameElement::NOT_SYNCED);
}
// Update the other copies.
- for (int i = new_backing_index + 1; i < elements_.length(); i++) {
+ for (int i = new_backing_index + 1; i < element_count(); i++) {
if (elements_[i].is_copy() && elements_[i].index() == index) {
elements_[i].set_index(new_backing_index);
elements_[new_backing_index].set_copied();
@@ -571,7 +616,7 @@ int VirtualFrame::InvalidateFrameSlotAt(int index) {
void VirtualFrame::TakeFrameSlotAt(int index) {
ASSERT(index >= 0);
- ASSERT(index <= elements_.length());
+ ASSERT(index <= element_count());
FrameElement original = elements_[index];
int new_backing_store_index = InvalidateFrameSlotAt(index);
if (new_backing_store_index != kIllegalIndex) {
@@ -583,18 +628,18 @@ void VirtualFrame::TakeFrameSlotAt(int index) {
case FrameElement::MEMORY: {
// Emit code to load the original element's data into a register.
// Push that register as a FrameElement on top of the frame.
- Result fresh = cgen_->allocator()->Allocate();
+ Result fresh = cgen()->allocator()->Allocate();
ASSERT(fresh.is_valid());
FrameElement new_element =
FrameElement::RegisterElement(fresh.reg(),
FrameElement::NOT_SYNCED);
- Use(fresh.reg(), elements_.length());
+ Use(fresh.reg(), element_count());
elements_.Add(new_element);
__ mov(fresh.reg(), Operand(ebp, fp_relative(index)));
break;
}
case FrameElement::REGISTER:
- Use(original.reg(), elements_.length());
+ Use(original.reg(), element_count());
// Fall through.
case FrameElement::CONSTANT:
case FrameElement::COPY:
@@ -613,9 +658,9 @@ void VirtualFrame::StoreToFrameSlotAt(int index) {
// a given index. The value on top of the frame is left in place.
// This is a duplicating operation, so it can create copies.
ASSERT(index >= 0);
- ASSERT(index < elements_.length());
+ ASSERT(index < element_count());
- int top_index = elements_.length() - 1;
+ int top_index = element_count() - 1;
FrameElement top = elements_[top_index];
FrameElement original = elements_[index];
if (top.is_copy() && top.index() == index) return;
@@ -657,12 +702,12 @@ void VirtualFrame::StoreToFrameSlotAt(int index) {
// temp register. Alternatively, allow copies to appear in
// any order in the frame and lazily move the value down to
// the slot.
- Result temp = cgen_->allocator()->Allocate();
+ Result temp = cgen()->allocator()->Allocate();
ASSERT(temp.is_valid());
__ mov(temp.reg(), Operand(ebp, fp_relative(backing_index)));
__ mov(Operand(ebp, fp_relative(index)), temp.reg());
} else {
- register_locations_[backing_element.reg().code()] = index;
+ set_register_location(backing_element.reg(), index);
if (backing_element.is_synced()) {
// If the element is a register, we will not actually move
// anything on the stack but only update the virtual frame
@@ -682,7 +727,7 @@ void VirtualFrame::StoreToFrameSlotAt(int index) {
// All the copies of the old backing element (including the top
// element) become copies of the new backing element.
- for (int i = backing_index + 1; i < elements_.length(); i++) {
+ for (int i = backing_index + 1; i < element_count(); i++) {
if (elements_[i].is_copy() && elements_[i].index() == backing_index) {
elements_[i].set_index(index);
}
@@ -704,12 +749,12 @@ void VirtualFrame::StoreToFrameSlotAt(int index) {
// The sync state of the former top element is correct (synced).
// Emit code to move the value down in the frame.
- Result temp = cgen_->allocator()->Allocate();
+ Result temp = cgen()->allocator()->Allocate();
ASSERT(temp.is_valid());
__ mov(temp.reg(), Operand(esp, 0));
__ mov(Operand(ebp, fp_relative(index)), temp.reg());
} else if (top.is_register()) {
- register_locations_[top.reg().code()] = index;
+ set_register_location(top.reg(), index);
// The stored-to slot has the (unsynced) register reference and
// the top element becomes a copy. The sync state of the top is
// preserved.
@@ -729,7 +774,7 @@ void VirtualFrame::StoreToFrameSlotAt(int index) {
void VirtualFrame::PushTryHandler(HandlerType type) {
- ASSERT(cgen_->HasValidEntryRegisters());
+ ASSERT(cgen()->HasValidEntryRegisters());
// Grow the expression stack by handler size less two (the return address
// is already pushed by a call instruction, and PushTryHandler from the
// macro assembler will leave the top of stack in the eax register to be
@@ -742,9 +787,9 @@ void VirtualFrame::PushTryHandler(HandlerType type) {
Result VirtualFrame::RawCallStub(CodeStub* stub) {
- ASSERT(cgen_->HasValidEntryRegisters());
+ ASSERT(cgen()->HasValidEntryRegisters());
__ CallStub(stub);
- Result result = cgen_->allocator()->Allocate(eax);
+ Result result = cgen()->allocator()->Allocate(eax);
ASSERT(result.is_valid());
return result;
}
@@ -785,9 +830,9 @@ Result VirtualFrame::CallStub(CodeStub* stub, Result* arg0, Result* arg1) {
Result VirtualFrame::CallRuntime(Runtime::Function* f, int arg_count) {
PrepareForCall(arg_count, arg_count);
- ASSERT(cgen_->HasValidEntryRegisters());
+ ASSERT(cgen()->HasValidEntryRegisters());
__ CallRuntime(f, arg_count);
- Result result = cgen_->allocator()->Allocate(eax);
+ Result result = cgen()->allocator()->Allocate(eax);
ASSERT(result.is_valid());
return result;
}
@@ -795,9 +840,9 @@ Result VirtualFrame::CallRuntime(Runtime::Function* f, int arg_count) {
Result VirtualFrame::CallRuntime(Runtime::FunctionId id, int arg_count) {
PrepareForCall(arg_count, arg_count);
- ASSERT(cgen_->HasValidEntryRegisters());
+ ASSERT(cgen()->HasValidEntryRegisters());
__ CallRuntime(id, arg_count);
- Result result = cgen_->allocator()->Allocate(eax);
+ Result result = cgen()->allocator()->Allocate(eax);
ASSERT(result.is_valid());
return result;
}
@@ -807,9 +852,9 @@ Result VirtualFrame::InvokeBuiltin(Builtins::JavaScript id,
InvokeFlag flag,
int arg_count) {
PrepareForCall(arg_count, arg_count);
- ASSERT(cgen_->HasValidEntryRegisters());
+ ASSERT(cgen()->HasValidEntryRegisters());
__ InvokeBuiltin(id, flag);
- Result result = cgen_->allocator()->Allocate(eax);
+ Result result = cgen()->allocator()->Allocate(eax);
ASSERT(result.is_valid());
return result;
}
@@ -817,9 +862,9 @@ Result VirtualFrame::InvokeBuiltin(Builtins::JavaScript id,
Result VirtualFrame::RawCallCodeObject(Handle<Code> code,
RelocInfo::Mode rmode) {
- ASSERT(cgen_->HasValidEntryRegisters());
+ ASSERT(cgen()->HasValidEntryRegisters());
__ call(code, rmode);
- Result result = cgen_->allocator()->Allocate(eax);
+ Result result = cgen()->allocator()->Allocate(eax);
ASSERT(result.is_valid());
return result;
}
@@ -898,9 +943,8 @@ Result VirtualFrame::CallCallIC(RelocInfo::Mode mode,
// Arguments, receiver, and function name are on top of the frame.
// The IC expects them on the stack. It does not drop the function
// name slot (but it does drop the rest).
- Handle<Code> ic = (loop_nesting > 0)
- ? cgen_->ComputeCallInitializeInLoop(arg_count)
- : cgen_->ComputeCallInitialize(arg_count);
+ InLoopFlag in_loop = loop_nesting > 0 ? IN_LOOP : NOT_IN_LOOP;
+ Handle<Code> ic = cgen()->ComputeCallInitialize(arg_count, in_loop);
// Spill args, receiver, and function. The call will drop args and
// receiver.
PrepareForCall(arg_count + 2, arg_count + 1);
@@ -922,7 +966,7 @@ Result VirtualFrame::CallConstructor(int arg_count) {
// Constructors are called with the number of arguments in register
// eax for now. Another option would be to have separate construct
// call trampolines per different arguments counts encountered.
- Result num_args = cgen_->allocator()->Allocate(eax);
+ Result num_args = cgen()->allocator()->Allocate(eax);
ASSERT(num_args.is_valid());
__ Set(num_args.reg(), Immediate(arg_count));
@@ -934,7 +978,7 @@ Result VirtualFrame::CallConstructor(int arg_count) {
void VirtualFrame::Drop(int count) {
ASSERT(height() >= count);
- int num_virtual_elements = (elements_.length() - 1) - stack_pointer_;
+ int num_virtual_elements = (element_count() - 1) - stack_pointer_;
// Emit code to lower the stack pointer if necessary.
if (num_virtual_elements < count) {
@@ -955,14 +999,14 @@ void VirtualFrame::Drop(int count) {
Result VirtualFrame::Pop() {
FrameElement element = elements_.RemoveLast();
- int index = elements_.length();
+ int index = element_count();
ASSERT(element.is_valid());
bool pop_needed = (stack_pointer_ == index);
if (pop_needed) {
stack_pointer_--;
if (element.is_memory()) {
- Result temp = cgen_->allocator()->Allocate();
+ Result temp = cgen()->allocator()->Allocate();
ASSERT(temp.is_valid());
temp.set_static_type(element.static_type());
__ pop(temp.reg());
@@ -989,7 +1033,7 @@ Result VirtualFrame::Pop() {
// Memory elements could only be the backing store of a copy.
// Allocate the original to a register.
ASSERT(index <= stack_pointer_);
- Result temp = cgen_->allocator()->Allocate();
+ Result temp = cgen()->allocator()->Allocate();
ASSERT(temp.is_valid());
Use(temp.reg(), index);
FrameElement new_element =
@@ -999,18 +1043,18 @@ Result VirtualFrame::Pop() {
new_element.set_static_type(element.static_type());
elements_[index] = new_element;
__ mov(temp.reg(), Operand(ebp, fp_relative(index)));
- return Result(temp.reg(), cgen_, element.static_type());
+ return Result(temp.reg(), element.static_type());
} else if (element.is_register()) {
- return Result(element.reg(), cgen_, element.static_type());
+ return Result(element.reg(), element.static_type());
} else {
ASSERT(element.is_constant());
- return Result(element.handle(), cgen_);
+ return Result(element.handle());
}
}
void VirtualFrame::EmitPop(Register reg) {
- ASSERT(stack_pointer_ == elements_.length() - 1);
+ ASSERT(stack_pointer_ == element_count() - 1);
stack_pointer_--;
elements_.RemoveLast();
__ pop(reg);
@@ -1018,7 +1062,7 @@ void VirtualFrame::EmitPop(Register reg) {
void VirtualFrame::EmitPop(Operand operand) {
- ASSERT(stack_pointer_ == elements_.length() - 1);
+ ASSERT(stack_pointer_ == element_count() - 1);
stack_pointer_--;
elements_.RemoveLast();
__ pop(operand);
@@ -1026,7 +1070,7 @@ void VirtualFrame::EmitPop(Operand operand) {
void VirtualFrame::EmitPush(Register reg) {
- ASSERT(stack_pointer_ == elements_.length() - 1);
+ ASSERT(stack_pointer_ == element_count() - 1);
elements_.Add(FrameElement::MemoryElement());
stack_pointer_++;
__ push(reg);
@@ -1034,7 +1078,7 @@ void VirtualFrame::EmitPush(Register reg) {
void VirtualFrame::EmitPush(Operand operand) {
- ASSERT(stack_pointer_ == elements_.length() - 1);
+ ASSERT(stack_pointer_ == element_count() - 1);
elements_.Add(FrameElement::MemoryElement());
stack_pointer_++;
__ push(operand);
@@ -1042,7 +1086,7 @@ void VirtualFrame::EmitPush(Operand operand) {
void VirtualFrame::EmitPush(Immediate immediate) {
- ASSERT(stack_pointer_ == elements_.length() - 1);
+ ASSERT(stack_pointer_ == element_count() - 1);
elements_.Add(FrameElement::MemoryElement());
stack_pointer_++;
__ push(immediate);
diff --git a/deps/v8/src/ia32/virtual-frame-ia32.h b/deps/v8/src/ia32/virtual-frame-ia32.h
index 298eda21d..6e6ebd50a 100644
--- a/deps/v8/src/ia32/virtual-frame-ia32.h
+++ b/deps/v8/src/ia32/virtual-frame-ia32.h
@@ -29,8 +29,10 @@
#define V8_IA32_VIRTUAL_FRAME_IA32_H_
#include "register-allocator.h"
+#include "scopes.h"
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
// -------------------------------------------------------------------------
// Virtual frames
@@ -41,7 +43,7 @@ namespace v8 { namespace internal {
// as random access to the expression stack elements, locals, and
// parameters.
-class VirtualFrame : public Malloced {
+class VirtualFrame : public ZoneObject {
public:
// A utility class to introduce a scope where the virtual frame is
// expected to remain spilled. The constructor spills the code
@@ -50,42 +52,66 @@ class VirtualFrame : public Malloced {
// generator is being transformed.
class SpilledScope BASE_EMBEDDED {
public:
- explicit SpilledScope(CodeGenerator* cgen);
+ SpilledScope() : previous_state_(cgen()->in_spilled_code()) {
+ ASSERT(cgen()->has_valid_frame());
+ cgen()->frame()->SpillAll();
+ cgen()->set_in_spilled_code(true);
+ }
- ~SpilledScope();
+ ~SpilledScope() {
+ cgen()->set_in_spilled_code(previous_state_);
+ }
private:
- CodeGenerator* cgen_;
bool previous_state_;
+
+ CodeGenerator* cgen() { return CodeGeneratorScope::Current(); }
};
// An illegal index into the virtual frame.
static const int kIllegalIndex = -1;
// Construct an initial virtual frame on entry to a JS function.
- explicit VirtualFrame(CodeGenerator* cgen);
+ VirtualFrame();
// Construct a virtual frame as a clone of an existing one.
explicit VirtualFrame(VirtualFrame* original);
+ CodeGenerator* cgen() { return CodeGeneratorScope::Current(); }
+ MacroAssembler* masm() { return cgen()->masm(); }
+
// Create a duplicate of an existing valid frame element.
FrameElement CopyElementAt(int index);
+ // The number of elements on the virtual frame.
+ int element_count() { return elements_.length(); }
+
// The height of the virtual expression stack.
- int height() const {
- return elements_.length() - expression_base_index();
+ int height() {
+ return element_count() - expression_base_index();
+ }
+
+ int register_location(int num) {
+ ASSERT(num >= 0 && num < RegisterAllocator::kNumRegisters);
+ return register_locations_[num];
}
- int register_index(Register reg) {
- return register_locations_[reg.code()];
+ int register_location(Register reg) {
+ return register_locations_[RegisterAllocator::ToNumber(reg)];
}
- bool is_used(int reg_code) {
- return register_locations_[reg_code] != kIllegalIndex;
+ void set_register_location(Register reg, int index) {
+ register_locations_[RegisterAllocator::ToNumber(reg)] = index;
+ }
+
+ bool is_used(int num) {
+ ASSERT(num >= 0 && num < RegisterAllocator::kNumRegisters);
+ return register_locations_[num] != kIllegalIndex;
}
bool is_used(Register reg) {
- return is_used(reg.code());
+ return register_locations_[RegisterAllocator::ToNumber(reg)]
+ != kIllegalIndex;
}
// Add extra in-memory elements to the top of the frame to match an actual
@@ -98,7 +124,12 @@ class VirtualFrame : public Malloced {
// match an external frame effect (examples include a call removing
// its arguments, and exiting a try/catch removing an exception
// handler). No code will be emitted.
- void Forget(int count);
+ void Forget(int count) {
+ ASSERT(count >= 0);
+ ASSERT(stack_pointer_ == element_count() - 1);
+ stack_pointer_ -= count;
+ ForgetElements(count);
+ }
// Forget count elements from the top of the frame without adjusting
// the stack pointer downward. This is used, for example, before
@@ -109,13 +140,25 @@ class VirtualFrame : public Malloced {
void SpillAll();
// Spill all occurrences of a specific register from the frame.
- void Spill(Register reg);
+ void Spill(Register reg) {
+ if (is_used(reg)) SpillElementAt(register_location(reg));
+ }
// Spill all occurrences of an arbitrary register if possible. Return the
// register spilled or no_reg if it was not possible to free any register
// (ie, they all have frame-external references).
Register SpillAnyRegister();
+ // Sync the range of elements in [begin, end] with memory.
+ void SyncRange(int begin, int end);
+
+ // Make this frame so that an arbitrary frame of the same height can
+ // be merged to it. Copies and constants are removed from the
+ // topmost mergable_elements elements of the frame. A
+ // mergable_elements of JumpTarget::kAllElements indicates constants
+ // and copies are should be removed from the entire frame.
+ void MakeMergable(int mergable_elements);
+
// Prepare this virtual frame for merging to an expected frame by
// performing some state changes that do not require generating
// code. It is guaranteed that no code will be generated.
@@ -130,13 +173,23 @@ class VirtualFrame : public Malloced {
// tells the register allocator that it is free to use frame-internal
// registers. Used when the code generator's frame is switched from this
// one to NULL by an unconditional jump.
- void DetachFromCodeGenerator();
+ void DetachFromCodeGenerator() {
+ RegisterAllocator* cgen_allocator = cgen()->allocator();
+ for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
+ if (is_used(i)) cgen_allocator->Unuse(i);
+ }
+ }
// (Re)attach a frame to its code generator. This informs the register
// allocator that the frame-internal register references are active again.
// Used when a code generator's frame is switched from NULL to this one by
// binding a label.
- void AttachToCodeGenerator();
+ void AttachToCodeGenerator() {
+ RegisterAllocator* cgen_allocator = cgen()->allocator();
+ for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
+ if (is_used(i)) cgen_allocator->Use(i);
+ }
+ }
// Emit code for the physical JS entry and exit frame sequences. After
// calling Enter, the virtual frame is ready for use; and after calling
@@ -151,7 +204,7 @@ class VirtualFrame : public Malloced {
void PrepareForReturn();
// Allocate and initialize the frame-allocated locals.
- void AllocateStackSlots(int count);
+ void AllocateStackSlots();
// An element of the expression stack as an assembly operand.
Operand ElementAt(int index) const {
@@ -164,22 +217,22 @@ class VirtualFrame : public Malloced {
// Set a frame element to a constant. The index is frame-top relative.
void SetElementAt(int index, Handle<Object> value) {
- Result temp(value, cgen_);
+ Result temp(value);
SetElementAt(index, &temp);
}
void PushElementAt(int index) {
- PushFrameSlotAt(elements_.length() - index - 1);
+ PushFrameSlotAt(element_count() - index - 1);
}
void StoreToElementAt(int index) {
- StoreToFrameSlotAt(elements_.length() - index - 1);
+ StoreToFrameSlotAt(element_count() - index - 1);
}
// A frame-allocated local as an assembly operand.
- Operand LocalAt(int index) const {
+ Operand LocalAt(int index) {
ASSERT(0 <= index);
- ASSERT(index < local_count_);
+ ASSERT(index < local_count());
return Operand(ebp, kLocal0Offset - index * kPointerSize);
}
@@ -215,10 +268,10 @@ class VirtualFrame : public Malloced {
void RestoreContextRegister();
// A parameter as an assembly operand.
- Operand ParameterAt(int index) const {
+ Operand ParameterAt(int index) {
ASSERT(-1 <= index); // -1 is the receiver.
- ASSERT(index < parameter_count_);
- return Operand(ebp, (1 + parameter_count_ - index) * kPointerSize);
+ ASSERT(index < parameter_count());
+ return Operand(ebp, (1 + parameter_count() - index) * kPointerSize);
}
// Push a copy of the value of a parameter frame slot on top of the frame.
@@ -240,14 +293,17 @@ class VirtualFrame : public Malloced {
}
// The receiver frame slot.
- Operand Receiver() const { return ParameterAt(-1); }
+ Operand Receiver() { return ParameterAt(-1); }
// Push a try-catch or try-finally handler on top of the virtual frame.
void PushTryHandler(HandlerType type);
// Call stub given the number of arguments it expects on (and
// removes from) the stack.
- Result CallStub(CodeStub* stub, int arg_count);
+ Result CallStub(CodeStub* stub, int arg_count) {
+ PrepareForCall(arg_count, arg_count);
+ return RawCallStub(stub);
+ }
// Call stub that takes a single argument passed in eax. The
// argument is given as a result which does not have to be eax or
@@ -307,7 +363,7 @@ class VirtualFrame : public Malloced {
void Drop() { Drop(1); }
// Duplicate the top element of the frame.
- void Dup() { PushFrameSlotAt(elements_.length() - 1); }
+ void Dup() { PushFrameSlotAt(element_count() - 1); }
// Pop an element from the top of the expression stack. Returns a
// Result, which may be a constant or a register.
@@ -331,7 +387,15 @@ class VirtualFrame : public Malloced {
// Pushing a result invalidates it (its contents become owned by the
// frame).
- void Push(Result* result);
+ void Push(Result* result) {
+ if (result->is_register()) {
+ Push(result->reg(), result->static_type());
+ } else {
+ ASSERT(result->is_constant());
+ Push(result->handle());
+ }
+ result->Unuse();
+ }
// Nip removes zero or more elements from immediately below the top
// of the frame, leaving the previous top-of-frame value on top of
@@ -346,70 +410,69 @@ class VirtualFrame : public Malloced {
static const int kHandlerSize = StackHandlerConstants::kSize / kPointerSize;
static const int kPreallocatedElements = 5 + 8; // 8 expression stack slots.
- CodeGenerator* cgen_;
- MacroAssembler* masm_;
-
- List<FrameElement> elements_;
-
- // The number of frame-allocated locals and parameters respectively.
- int parameter_count_;
- int local_count_;
+ ZoneList<FrameElement> elements_;
// The index of the element that is at the processor's stack pointer
// (the esp register).
int stack_pointer_;
- // The index of the element that is at the processor's frame pointer
- // (the ebp register).
- int frame_pointer_;
-
// The index of the register frame element using each register, or
// kIllegalIndex if a register is not on the frame.
- int register_locations_[kNumRegisters];
+ int register_locations_[RegisterAllocator::kNumRegisters];
+
+ // The number of frame-allocated locals and parameters respectively.
+ int parameter_count() { return cgen()->scope()->num_parameters(); }
+ int local_count() { return cgen()->scope()->num_stack_slots(); }
+
+ // The index of the element that is at the processor's frame pointer
+ // (the ebp register). The parameters, receiver, and return address
+ // are below the frame pointer.
+ int frame_pointer() { return parameter_count() + 2; }
// The index of the first parameter. The receiver lies below the first
// parameter.
- int param0_index() const { return 1; }
+ int param0_index() { return 1; }
- // The index of the context slot in the frame.
- int context_index() const {
- ASSERT(frame_pointer_ != kIllegalIndex);
- return frame_pointer_ + 1;
- }
+ // The index of the context slot in the frame. It is immediately
+ // above the frame pointer.
+ int context_index() { return frame_pointer() + 1; }
- // The index of the function slot in the frame. It lies above the context
- // slot.
- int function_index() const {
- ASSERT(frame_pointer_ != kIllegalIndex);
- return frame_pointer_ + 2;
- }
+ // The index of the function slot in the frame. It is above the frame
+ // pointer and the context slot.
+ int function_index() { return frame_pointer() + 2; }
- // The index of the first local. Between the parameters and the locals
- // lie the return address, the saved frame pointer, the context, and the
- // function.
- int local0_index() const {
- ASSERT(frame_pointer_ != kIllegalIndex);
- return frame_pointer_ + 3;
- }
+ // The index of the first local. Between the frame pointer and the
+ // locals lie the context and the function.
+ int local0_index() { return frame_pointer() + 3; }
// The index of the base of the expression stack.
- int expression_base_index() const { return local0_index() + local_count_; }
+ int expression_base_index() { return local0_index() + local_count(); }
// Convert a frame index into a frame pointer relative offset into the
// actual stack.
- int fp_relative(int index) const {
- return (frame_pointer_ - index) * kPointerSize;
+ int fp_relative(int index) {
+ ASSERT(index < element_count());
+ ASSERT(frame_pointer() < element_count()); // FP is on the frame.
+ return (frame_pointer() - index) * kPointerSize;
}
// Record an occurrence of a register in the virtual frame. This has the
// effect of incrementing the register's external reference count and
// of updating the index of the register's location in the frame.
- void Use(Register reg, int index);
+ void Use(Register reg, int index) {
+ ASSERT(!is_used(reg));
+ set_register_location(reg, index);
+ cgen()->allocator()->Use(reg);
+ }
// Record that a register reference has been dropped from the frame. This
// decrements the register's external reference count and invalidates the
// index of the register's location in the frame.
- void Unuse(Register reg);
+ void Unuse(Register reg) {
+ ASSERT(is_used(reg));
+ set_register_location(reg, kIllegalIndex);
+ cgen()->allocator()->Unuse(reg);
+ }
// Spill the element at a particular index---write it to memory if
// necessary, free any associated register, and forget its value if
@@ -421,9 +484,6 @@ class VirtualFrame : public Malloced {
// Keep the element type as register or constant, and clear the dirty bit.
void SyncElementAt(int index);
- // Sync the range of elements in [begin, end).
- void SyncRange(int begin, int end);
-
// Sync a single unsynced element that lies beneath or at the stack pointer.
void SyncElementBelowStackPointer(int index);
@@ -485,9 +545,12 @@ class VirtualFrame : public Malloced {
bool Equals(VirtualFrame* other);
+ // Classes that need raw access to the elements_ array.
+ friend class DeferredCode;
friend class JumpTarget;
};
+
} } // namespace v8::internal
#endif // V8_IA32_VIRTUAL_FRAME_IA32_H_
diff --git a/deps/v8/src/ic-inl.h b/deps/v8/src/ic-inl.h
index bb5696281..08304d83e 100644
--- a/deps/v8/src/ic-inl.h
+++ b/deps/v8/src/ic-inl.h
@@ -32,7 +32,8 @@
#include "debug.h"
#include "macro-assembler.h"
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
Address IC::address() {
diff --git a/deps/v8/src/ic.cc b/deps/v8/src/ic.cc
index ccdf3cab4..657614a39 100644
--- a/deps/v8/src/ic.cc
+++ b/deps/v8/src/ic.cc
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2006-2009 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -35,13 +35,13 @@
#include "runtime.h"
#include "stub-cache.h"
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
#ifdef DEBUG
static char TransitionMarkFromState(IC::State state) {
switch (state) {
case UNINITIALIZED: return '0';
- case UNINITIALIZED_IN_LOOP: return 'L';
case PREMONOMORPHIC: return 'P';
case MONOMORPHIC: return '1';
case MONOMORPHIC_PROTOTYPE_FAILURE: return '^';
@@ -60,12 +60,14 @@ static char TransitionMarkFromState(IC::State state) {
void IC::TraceIC(const char* type,
Handle<String> name,
State old_state,
- Code* new_target) {
+ Code* new_target,
+ const char* extra_info) {
if (FLAG_trace_ic) {
State new_state = StateFrom(new_target, Heap::undefined_value());
- PrintF("[%s (%c->%c) ", type,
+ PrintF("[%s (%c->%c)%s", type,
TransitionMarkFromState(old_state),
- TransitionMarkFromState(new_state));
+ TransitionMarkFromState(new_state),
+ extra_info);
name->Print();
PrintF("]\n");
}
@@ -226,8 +228,10 @@ void IC::Clear(Address address) {
void CallIC::Clear(Address address, Code* target) {
State state = target->ic_state();
- if (state == UNINITIALIZED || state == UNINITIALIZED_IN_LOOP) return;
- Code* code = StubCache::FindCallInitialize(target->arguments_count());
+ InLoopFlag in_loop = target->ic_in_loop();
+ if (state == UNINITIALIZED) return;
+ Code* code =
+ StubCache::FindCallInitialize(target->arguments_count(), in_loop);
SetTargetAtAddress(address, code);
}
@@ -390,21 +394,22 @@ void CallIC::UpdateCaches(LookupResult* lookup,
// Compute the number of arguments.
int argc = target()->arguments_count();
+ InLoopFlag in_loop = target()->ic_in_loop();
Object* code = NULL;
if (state == UNINITIALIZED) {
// This is the first time we execute this inline cache.
// Set the target to the pre monomorphic stub to delay
// setting the monomorphic state.
- code = StubCache::ComputeCallPreMonomorphic(argc);
+ code = StubCache::ComputeCallPreMonomorphic(argc, in_loop);
} else if (state == MONOMORPHIC) {
- code = StubCache::ComputeCallMegamorphic(argc);
+ code = StubCache::ComputeCallMegamorphic(argc, in_loop);
} else {
// Compute monomorphic stub.
switch (lookup->type()) {
case FIELD: {
int index = lookup->GetFieldIndex();
- code = StubCache::ComputeCallField(argc, *name, *object,
+ code = StubCache::ComputeCallField(argc, in_loop, *name, *object,
lookup->holder(), index);
break;
}
@@ -413,7 +418,7 @@ void CallIC::UpdateCaches(LookupResult* lookup,
// call; used for rewriting to monomorphic state and making sure
// that the code stub is in the stub cache.
JSFunction* function = lookup->GetConstantFunction();
- code = StubCache::ComputeCallConstant(argc, *name, *object,
+ code = StubCache::ComputeCallConstant(argc, in_loop, *name, *object,
lookup->holder(), function);
break;
}
@@ -425,7 +430,7 @@ void CallIC::UpdateCaches(LookupResult* lookup,
if (!object->IsJSObject()) return;
Handle<JSObject> receiver = Handle<JSObject>::cast(object);
if (lookup->holder() != *receiver) return;
- code = StubCache::ComputeCallNormal(argc, *name, *receiver);
+ code = StubCache::ComputeCallNormal(argc, in_loop, *name, *receiver);
break;
}
case INTERCEPTOR: {
@@ -443,14 +448,15 @@ void CallIC::UpdateCaches(LookupResult* lookup,
if (code->IsFailure()) return;
// Patch the call site depending on the state of the cache.
- if (state == UNINITIALIZED || state == UNINITIALIZED_IN_LOOP ||
- state == PREMONOMORPHIC || state == MONOMORPHIC ||
+ if (state == UNINITIALIZED ||
+ state == PREMONOMORPHIC ||
+ state == MONOMORPHIC ||
state == MONOMORPHIC_PROTOTYPE_FAILURE) {
set_target(Code::cast(code));
}
#ifdef DEBUG
- TraceIC("CallIC", name, state, target());
+ TraceIC("CallIC", name, state, target(), in_loop ? " (in-loop)" : "");
#endif
}
@@ -1088,14 +1094,27 @@ Object* CallIC_Miss(Arguments args) {
IC::State state = IC::StateFrom(ic.target(), args[0]);
Object* result =
ic.LoadFunction(state, args.at<Object>(0), args.at<String>(1));
- if (state != UNINITIALIZED_IN_LOOP || !result->IsJSFunction())
+
+ // The first time the inline cache is updated may be the first time the
+ // function it references gets called. If the function was lazily compiled
+ // then the first call will trigger a compilation. We check for this case
+ // and we do the compilation immediately, instead of waiting for the stub
+ // currently attached to the JSFunction object to trigger compilation. We
+ // do this in the case where we know that the inline cache is inside a loop,
+ // because then we know that we want to optimize the function.
+ if (!result->IsJSFunction() || JSFunction::cast(result)->is_compiled()) {
return result;
+ }
- // Compile the function with the knowledge that it's called from
- // within a loop. This enables further optimization of the function.
+ // Compile now with optimization.
HandleScope scope;
Handle<JSFunction> function = Handle<JSFunction>(JSFunction::cast(result));
- if (!function->is_compiled()) CompileLazyInLoop(function, CLEAR_EXCEPTION);
+ InLoopFlag in_loop = ic.target()->ic_in_loop();
+ if (in_loop == IN_LOOP) {
+ CompileLazyInLoop(function, CLEAR_EXCEPTION);
+ } else {
+ CompileLazy(function, CLEAR_EXCEPTION);
+ }
return *function;
}
diff --git a/deps/v8/src/ic.h b/deps/v8/src/ic.h
index 11fd60454..bd94fd89e 100644
--- a/deps/v8/src/ic.h
+++ b/deps/v8/src/ic.h
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2006-2009 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -30,7 +30,8 @@
#include "assembler.h"
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
// IC_UTIL_LIST defines all utility functions called from generated
// inline caching code. The argument for the macro, ICU, is the function name.
@@ -120,7 +121,8 @@ class IC {
static void TraceIC(const char* type,
Handle<String> name,
State old_state,
- Code* new_target);
+ Code* new_target,
+ const char* extra_info = "");
#endif
static Failure* TypeError(const char* type,
diff --git a/deps/v8/src/interpreter-irregexp.cc b/deps/v8/src/interpreter-irregexp.cc
index 77bcc9074..355fae467 100644
--- a/deps/v8/src/interpreter-irregexp.cc
+++ b/deps/v8/src/interpreter-irregexp.cc
@@ -36,7 +36,8 @@
#include "interpreter-irregexp.h"
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
static unibrow::Mapping<unibrow::Ecma262Canonicalize> interp_canonicalize;
diff --git a/deps/v8/src/interpreter-irregexp.h b/deps/v8/src/interpreter-irregexp.h
index c65cb9ecd..0ad8846d7 100644
--- a/deps/v8/src/interpreter-irregexp.h
+++ b/deps/v8/src/interpreter-irregexp.h
@@ -30,7 +30,8 @@
#ifndef V8_INTERPRETER_IRREGEXP_H_
#define V8_INTERPRETER_IRREGEXP_H_
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
class IrregexpInterpreter {
diff --git a/deps/v8/src/json-delay.js b/deps/v8/src/json-delay.js
index 90150c6f1..1a6f0085c 100644
--- a/deps/v8/src/json-delay.js
+++ b/deps/v8/src/json-delay.js
@@ -29,7 +29,7 @@ var $JSON = global.JSON;
function ParseJSONUnfiltered(text) {
var s = $String(text);
- var f = %CompileString("(" + text + ")", -1, true);
+ var f = %CompileString("(" + text + ")", true);
return f();
}
diff --git a/deps/v8/src/jsregexp-inl.h b/deps/v8/src/jsregexp-inl.h
index 09c4c8f91..cc90bd172 100644
--- a/deps/v8/src/jsregexp-inl.h
+++ b/deps/v8/src/jsregexp-inl.h
@@ -33,7 +33,8 @@
#include "regexp-macro-assembler.h"
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
template <typename C>
diff --git a/deps/v8/src/jsregexp.cc b/deps/v8/src/jsregexp.cc
index ebaefc0d7..6fce1f5c9 100644
--- a/deps/v8/src/jsregexp.cc
+++ b/deps/v8/src/jsregexp.cc
@@ -28,6 +28,7 @@
#include "v8.h"
#include "ast.h"
+#include "compiler.h"
#include "execution.h"
#include "factory.h"
#include "jsregexp-inl.h"
@@ -42,7 +43,7 @@
#include "regexp-macro-assembler-irregexp.h"
#include "regexp-stack.h"
-#ifdef V8_TARGET_ARCH_IA32
+#if V8_TARGET_ARCH_IA32
#include "ia32/macro-assembler-ia32.h"
#include "ia32/regexp-macro-assembler-ia32.h"
#elif V8_TARGET_ARCH_X64
@@ -55,7 +56,8 @@
#include "interpreter-irregexp.h"
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
Handle<Object> RegExpImpl::CreateRegExpLiteral(Handle<JSFunction> constructor,
@@ -153,7 +155,7 @@ Handle<Object> RegExpImpl::Compile(Handle<JSRegExp> re,
return re;
}
FlattenString(pattern);
- ZoneScope zone_scope(DELETE_ON_EXIT);
+ CompilationZoneScope zone_scope(DELETE_ON_EXIT);
RegExpCompileData parse_result;
FlatStringReader reader(pattern);
if (!ParseRegExp(&reader, flags.is_multiline(), &parse_result)) {
@@ -285,7 +287,7 @@ bool RegExpImpl::EnsureCompiledIrregexp(Handle<JSRegExp> re, bool is_ascii) {
}
// Compile the RegExp.
- ZoneScope zone_scope(DELETE_ON_EXIT);
+ CompilationZoneScope zone_scope(DELETE_ON_EXIT);
JSRegExp::Flags flags = re->GetFlags();
@@ -4187,6 +4189,11 @@ OutSet* DispatchTable::Get(uc16 value) {
void Analysis::EnsureAnalyzed(RegExpNode* that) {
+ StackLimitCheck check;
+ if (check.HasOverflowed()) {
+ fail("Stack overflow");
+ return;
+ }
if (that->info()->been_analyzed || that->info()->being_analyzed)
return;
that->info()->being_analyzed = true;
@@ -4224,16 +4231,20 @@ void Analysis::VisitText(TextNode* that) {
that->MakeCaseIndependent();
}
EnsureAnalyzed(that->on_success());
- that->CalculateOffsets();
+ if (!has_failed()) {
+ that->CalculateOffsets();
+ }
}
void Analysis::VisitAction(ActionNode* that) {
RegExpNode* target = that->on_success();
EnsureAnalyzed(target);
- // If the next node is interested in what it follows then this node
- // has to be interested too so it can pass the information on.
- that->info()->AddFromFollowing(target->info());
+ if (!has_failed()) {
+ // If the next node is interested in what it follows then this node
+ // has to be interested too so it can pass the information on.
+ that->info()->AddFromFollowing(target->info());
+ }
}
@@ -4242,6 +4253,7 @@ void Analysis::VisitChoice(ChoiceNode* that) {
for (int i = 0; i < that->alternatives()->length(); i++) {
RegExpNode* node = that->alternatives()->at(i).node();
EnsureAnalyzed(node);
+ if (has_failed()) return;
// Anything the following nodes need to know has to be known by
// this node also, so it can pass it on.
info->AddFromFollowing(node->info());
@@ -4255,13 +4267,16 @@ void Analysis::VisitLoopChoice(LoopChoiceNode* that) {
RegExpNode* node = that->alternatives()->at(i).node();
if (node != that->loop_node()) {
EnsureAnalyzed(node);
+ if (has_failed()) return;
info->AddFromFollowing(node->info());
}
}
// Check the loop last since it may need the value of this node
// to get a correct result.
EnsureAnalyzed(that->loop_node());
- info->AddFromFollowing(that->loop_node()->info());
+ if (!has_failed()) {
+ info->AddFromFollowing(that->loop_node()->info());
+ }
}
@@ -4433,6 +4448,10 @@ RegExpEngine::CompilationResult RegExpEngine::Compile(RegExpCompileData* data,
data->node = node;
Analysis analysis(ignore_case);
analysis.EnsureAnalyzed(node);
+ if (analysis.has_failed()) {
+ const char* error_message = analysis.error_message();
+ return CompilationResult(error_message);
+ }
NodeInfo info = *node->info();
diff --git a/deps/v8/src/jsregexp.h b/deps/v8/src/jsregexp.h
index 9fa0ecef0..a86f7e648 100644
--- a/deps/v8/src/jsregexp.h
+++ b/deps/v8/src/jsregexp.h
@@ -28,7 +28,8 @@
#ifndef V8_JSREGEXP_H_
#define V8_JSREGEXP_H_
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
class RegExpMacroAssembler;
@@ -1309,7 +1310,7 @@ FOR_EACH_NODE_TYPE(DECLARE_VISIT)
class Analysis: public NodeVisitor {
public:
explicit Analysis(bool ignore_case)
- : ignore_case_(ignore_case) { }
+ : ignore_case_(ignore_case), error_message_(NULL) { }
void EnsureAnalyzed(RegExpNode* node);
#define DECLARE_VISIT(Type) \
@@ -1318,8 +1319,17 @@ FOR_EACH_NODE_TYPE(DECLARE_VISIT)
#undef DECLARE_VISIT
virtual void VisitLoopChoice(LoopChoiceNode* that);
+ bool has_failed() { return error_message_ != NULL; }
+ const char* error_message() {
+ ASSERT(error_message_ != NULL);
+ return error_message_;
+ }
+ void fail(const char* error_message) {
+ error_message_ = error_message;
+ }
private:
bool ignore_case_;
+ const char* error_message_;
DISALLOW_IMPLICIT_CONSTRUCTORS(Analysis);
};
diff --git a/deps/v8/src/jump-target-inl.h b/deps/v8/src/jump-target-inl.h
new file mode 100644
index 000000000..1f0676df0
--- /dev/null
+++ b/deps/v8/src/jump-target-inl.h
@@ -0,0 +1,49 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_JUMP_TARGET_INL_H_
+#define V8_JUMP_TARGET_INL_H_
+
+namespace v8 {
+namespace internal {
+
+CodeGenerator* JumpTarget::cgen() {
+ return CodeGeneratorScope::Current();
+}
+
+void JumpTarget::InitializeEntryElement(int index, FrameElement* target) {
+ entry_frame_->elements_[index].clear_copied();
+ if (target->is_register()) {
+ entry_frame_->set_register_location(target->reg(), index);
+ } else if (target->is_copy()) {
+ entry_frame_->elements_[target->index()].set_copied();
+ }
+}
+
+} } // namespace v8::internal
+
+#endif // V8_JUMP_TARGET_INL_H_
diff --git a/deps/v8/src/jump-target.cc b/deps/v8/src/jump-target.cc
index 6e41270ee..a8eda6bd9 100644
--- a/deps/v8/src/jump-target.cc
+++ b/deps/v8/src/jump-target.cc
@@ -28,115 +28,23 @@
#include "v8.h"
#include "codegen-inl.h"
+#include "jump-target-inl.h"
#include "register-allocator-inl.h"
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
// -------------------------------------------------------------------------
// JumpTarget implementation.
-JumpTarget::JumpTarget(CodeGenerator* cgen, Directionality direction)
- : cgen_(cgen),
- direction_(direction),
- reaching_frames_(0),
- merge_labels_(0),
- entry_frame_(NULL),
- is_bound_(false),
- is_linked_(false) {
- ASSERT(cgen != NULL);
- masm_ = cgen->masm();
-}
-
-
-JumpTarget::JumpTarget()
- : cgen_(NULL),
- masm_(NULL),
- direction_(FORWARD_ONLY),
- reaching_frames_(0),
- merge_labels_(0),
- entry_frame_(NULL),
- is_bound_(false),
- is_linked_(false) {
-}
-
-
-void JumpTarget::Initialize(CodeGenerator* cgen, Directionality direction) {
- ASSERT(cgen != NULL);
- ASSERT(cgen_ == NULL);
- cgen_ = cgen;
- masm_ = cgen->masm();
- direction_ = direction;
-}
+bool JumpTarget::compiling_deferred_code_ = false;
void JumpTarget::Unuse() {
- // We should not deallocate jump targets that have unresolved jumps
- // to them. In the event of a compile-time stack overflow or an
- // uninitialized jump target, we don't care.
- ASSERT(!is_linked() || cgen_ == NULL || cgen_->HasStackOverflow());
- for (int i = 0; i < reaching_frames_.length(); i++) {
- delete reaching_frames_[i];
- }
- delete entry_frame_;
- Reset();
-}
-
-
-void JumpTarget::Reset() {
reaching_frames_.Clear();
merge_labels_.Clear();
entry_frame_ = NULL;
entry_label_.Unuse();
- is_bound_ = false;
- is_linked_ = false;
-}
-
-
-FrameElement* JumpTarget::Combine(FrameElement* left, FrameElement* right) {
- // Given a pair of non-null frame element pointers, return one of
- // them as an entry frame candidate or null if they are
- // incompatible.
-
- // If either is invalid, the result is.
- if (!left->is_valid()) return left;
- if (!right->is_valid()) return right;
-
- // If they have the same value, the result is the same. If either
- // is unsynced, the result is.
-
- if (left->is_memory() && right->is_memory()) return left;
-
- if (left->is_register() && right->is_register() &&
- left->reg().is(right->reg())) {
- if (!left->is_synced()) {
- return left;
- } else {
- return right;
- }
- }
-
- if (left->is_constant() &&
- right->is_constant() &&
- left->handle().is_identical_to(right->handle())) {
- if (!left->is_synced()) {
- return left;
- } else {
- return right;
- }
- }
-
- if (left->is_copy() &&
- right->is_copy() &&
- left->index() == right->index()) {
- if (!left->is_synced()) {
- return left;
- } else {
- return right;
- }
- }
-
- // Otherwise they are incompatible and we will reallocate them.
- return NULL;
}
@@ -145,13 +53,29 @@ void JumpTarget::ComputeEntryFrame(int mergable_elements) {
// the directionality of the block. Compute: an entry frame for the
// block.
+ Counters::compute_entry_frame.Increment();
+#ifdef DEBUG
+ if (compiling_deferred_code_) {
+ ASSERT(reaching_frames_.length() > 1);
+ VirtualFrame* frame = reaching_frames_[0];
+ bool all_identical = true;
+ for (int i = 1; i < reaching_frames_.length(); i++) {
+ if (!frame->Equals(reaching_frames_[i])) {
+ all_identical = false;
+ break;
+ }
+ }
+ ASSERT(!all_identical || all_identical);
+ }
+#endif
+
// Choose an initial frame.
VirtualFrame* initial_frame = reaching_frames_[0];
// A list of pointers to frame elements in the entry frame. NULL
// indicates that the element has not yet been determined.
- int length = initial_frame->elements_.length();
- List<FrameElement*> elements(length);
+ int length = initial_frame->element_count();
+ ZoneList<FrameElement*> elements(length);
// Convert the number of mergable elements (counted from the top
// down) to a frame high-water mark (counted from the bottom up).
@@ -165,63 +89,59 @@ void JumpTarget::ComputeEntryFrame(int mergable_elements) {
// frame.
for (int i = 0; i < length; i++) {
FrameElement element = initial_frame->elements_[i];
- // We do not allow copies or constants in bidirectional frames.
- if (direction_ == BIDIRECTIONAL && i > high_water_mark &&
- (element.is_constant() || element.is_copy())) {
- elements.Add(NULL);
- } else {
- elements.Add(&initial_frame->elements_[i]);
+ // We do not allow copies or constants in bidirectional frames. All
+ // elements above the water mark on bidirectional frames have
+ // unknown static types.
+ if (direction_ == BIDIRECTIONAL && i > high_water_mark) {
+ if (element.is_constant() || element.is_copy()) {
+ elements.Add(NULL);
+ continue;
+ }
+ // It's safe to change the static type on the initial frame
+ // element, see comment in JumpTarget::Combine.
+ initial_frame->elements_[i].set_static_type(StaticType::unknown());
}
+ elements.Add(&initial_frame->elements_[i]);
}
// Compute elements based on the other reaching frames.
if (reaching_frames_.length() > 1) {
for (int i = 0; i < length; i++) {
+ FrameElement* element = elements[i];
for (int j = 1; j < reaching_frames_.length(); j++) {
- FrameElement* element = elements[i];
-
// Element computation is monotonic: new information will not
// change our decision about undetermined or invalid elements.
if (element == NULL || !element->is_valid()) break;
- elements[i] = Combine(element, &reaching_frames_[j]->elements_[i]);
+ element = element->Combine(&reaching_frames_[j]->elements_[i]);
}
+ elements[i] = element;
}
}
// Build the new frame. A freshly allocated frame has memory elements
// for the parameters and some platform-dependent elements (e.g.,
// return address). Replace those first.
- entry_frame_ = new VirtualFrame(cgen_);
+ entry_frame_ = new VirtualFrame();
int index = 0;
- for (; index < entry_frame_->elements_.length(); index++) {
+ for (; index < entry_frame_->element_count(); index++) {
+ FrameElement* target = elements[index];
// If the element is determined, set it now. Count registers. Mark
// elements as copied exactly when they have a copy. Undetermined
// elements are initially recorded as if in memory.
- if (elements[index] != NULL) {
- entry_frame_->elements_[index] = *elements[index];
- entry_frame_->elements_[index].clear_copied();
- if (elements[index]->is_register()) {
- entry_frame_->register_locations_[elements[index]->reg().code()] =
- index;
- } else if (elements[index]->is_copy()) {
- entry_frame_->elements_[elements[index]->index()].set_copied();
- }
+ if (target != NULL) {
+ entry_frame_->elements_[index] = *target;
+ InitializeEntryElement(index, target);
}
}
// Then fill in the rest of the frame with new elements.
for (; index < length; index++) {
- if (elements[index] == NULL) {
+ FrameElement* target = elements[index];
+ if (target == NULL) {
entry_frame_->elements_.Add(FrameElement::MemoryElement());
} else {
- entry_frame_->elements_.Add(*elements[index]);
- entry_frame_->elements_[index].clear_copied();
- if (elements[index]->is_register()) {
- entry_frame_->register_locations_[elements[index]->reg().code()] =
- index;
- } else if (elements[index]->is_copy()) {
- entry_frame_->elements_[elements[index]->index()].set_copied();
- }
+ entry_frame_->elements_.Add(*target);
+ InitializeEntryElement(index, target);
}
}
@@ -229,82 +149,74 @@ void JumpTarget::ComputeEntryFrame(int mergable_elements) {
// memory, from the top down.
for (int i = length - 1; i >= 0; i--) {
if (elements[i] == NULL) {
- // If the value is synced on all frames, put it in memory. This
- // costs nothing at the merge code but will incur a
- // memory-to-register move when the value is needed later.
+ // Loop over all the reaching frames to check whether the element
+ // is synced on all frames, to count the registers it occupies,
+ // and to compute a merged static type.
bool is_synced = true;
- for (int j = 0; is_synced && j < reaching_frames_.length(); j++) {
- is_synced = reaching_frames_[j]->elements_[i].is_synced();
- }
-
- // There is nothing to be done if the elements are all synced.
- // It is already recorded as a memory element.
- if (is_synced) continue;
-
- // Choose an available register. Prefer ones that the element
- // is already occupying on some reaching frame.
RegisterFile candidate_registers;
- int max_count = kMinInt;
- int best_reg_code = no_reg.code_;
+ int best_count = kMinInt;
+ int best_reg_num = RegisterAllocator::kInvalidRegister;
+
+ StaticType type; // Initially invalid.
+ if (direction_ != BIDIRECTIONAL || i < high_water_mark) {
+ type = reaching_frames_[0]->elements_[i].static_type();
+ }
for (int j = 0; j < reaching_frames_.length(); j++) {
FrameElement element = reaching_frames_[j]->elements_[i];
- if (element.is_register() &&
- !entry_frame_->is_used(element.reg())) {
- candidate_registers.Use(element.reg());
- if (candidate_registers.count(element.reg()) > max_count) {
- max_count = candidate_registers.count(element.reg());
- best_reg_code = element.reg().code();
+ is_synced = is_synced && element.is_synced();
+ if (element.is_register() && !entry_frame_->is_used(element.reg())) {
+ // Count the register occurrence and remember it if better
+ // than the previous best.
+ int num = RegisterAllocator::ToNumber(element.reg());
+ candidate_registers.Use(num);
+ if (candidate_registers.count(num) > best_count) {
+ best_count = candidate_registers.count(num);
+ best_reg_num = num;
}
}
+ type = type.merge(element.static_type());
}
- // If there was no preferred choice consider any free register.
- if (best_reg_code == no_reg.code_) {
- for (int j = 0; j < kNumRegisters; j++) {
- if (!entry_frame_->is_used(j) && !RegisterAllocator::IsReserved(j)) {
- best_reg_code = j;
+
+ // If the value is synced on all frames, put it in memory. This
+ // costs nothing at the merge code but will incur a
+ // memory-to-register move when the value is needed later.
+ if (is_synced) {
+ // Already recorded as a memory element.
+ entry_frame_->elements_[i].set_static_type(type);
+ continue;
+ }
+
+ // Try to put it in a register. If there was no best choice
+ // consider any free register.
+ if (best_reg_num == RegisterAllocator::kInvalidRegister) {
+ for (int j = 0; j < RegisterAllocator::kNumRegisters; j++) {
+ if (!entry_frame_->is_used(j)) {
+ best_reg_num = j;
break;
}
}
}
- if (best_reg_code != no_reg.code_) {
+ if (best_reg_num == RegisterAllocator::kInvalidRegister) {
+ // If there was no register found, the element is already
+ // recorded as in memory.
+ entry_frame_->elements_[i].set_static_type(type);
+ } else {
// If there was a register choice, use it. Preserve the copied
- // flag on the element.
+ // flag on the element. Set the static type as computed.
bool is_copied = entry_frame_->elements_[i].is_copied();
- Register reg = { best_reg_code };
+ Register reg = RegisterAllocator::ToRegister(best_reg_num);
entry_frame_->elements_[i] =
FrameElement::RegisterElement(reg,
FrameElement::NOT_SYNCED);
if (is_copied) entry_frame_->elements_[i].set_copied();
- entry_frame_->register_locations_[best_reg_code] = i;
+ entry_frame_->elements_[i].set_static_type(type);
+ entry_frame_->set_register_location(reg, i);
}
- // If there was no register found, the element is already
- // recorded as in memory.
}
}
- // Set the static type of frame elements.
- for (int i = 0; i < length; i++) {
- FrameElement* current = &entry_frame_->elements_[i];
- if (direction_ == BIDIRECTIONAL && i >= high_water_mark) {
- current->set_static_type(StaticType::unknown());
- } else {
- StaticType merged_type = reaching_frames_[0]->elements_[i].static_type();
- for (int j = 1, n = reaching_frames_.length();
- !merged_type.is_unknown() && j < n;
- j++) {
- merged_type =
- merged_type.merge(reaching_frames_[j]->elements_[i].static_type());
- }
- current->set_static_type(merged_type);
- }
- }
-
- // Fill in the other fields of the entry frame.
- entry_frame_->local_count_ = initial_frame->local_count_;
- entry_frame_->frame_pointer_ = initial_frame->frame_pointer_;
-
// The stack pointer is at the highest synced element or the base of
// the expression stack.
int stack_pointer = length - 1;
@@ -322,31 +234,28 @@ void JumpTarget::Jump() {
void JumpTarget::Jump(Result* arg) {
- ASSERT(cgen_ != NULL);
- ASSERT(cgen_->has_valid_frame());
+ ASSERT(cgen()->has_valid_frame());
- cgen_->frame()->Push(arg);
+ cgen()->frame()->Push(arg);
DoJump();
}
void JumpTarget::Jump(Result* arg0, Result* arg1) {
- ASSERT(cgen_ != NULL);
- ASSERT(cgen_->has_valid_frame());
+ ASSERT(cgen()->has_valid_frame());
- cgen_->frame()->Push(arg0);
- cgen_->frame()->Push(arg1);
+ cgen()->frame()->Push(arg0);
+ cgen()->frame()->Push(arg1);
DoJump();
}
void JumpTarget::Jump(Result* arg0, Result* arg1, Result* arg2) {
- ASSERT(cgen_ != NULL);
- ASSERT(cgen_->has_valid_frame());
+ ASSERT(cgen()->has_valid_frame());
- cgen_->frame()->Push(arg0);
- cgen_->frame()->Push(arg1);
- cgen_->frame()->Push(arg2);
+ cgen()->frame()->Push(arg0);
+ cgen()->frame()->Push(arg1);
+ cgen()->frame()->Push(arg2);
DoJump();
}
@@ -372,35 +281,33 @@ void JumpTarget::Branch(Condition cc, Hint hint) {
#endif
void JumpTarget::Branch(Condition cc, Result* arg, Hint hint) {
- ASSERT(cgen_ != NULL);
- ASSERT(cgen_->has_valid_frame());
+ ASSERT(cgen()->has_valid_frame());
// We want to check that non-frame registers at the call site stay in
// the same registers on the fall-through branch.
DECLARE_ARGCHECK_VARS(arg);
- cgen_->frame()->Push(arg);
+ cgen()->frame()->Push(arg);
DoBranch(cc, hint);
- *arg = cgen_->frame()->Pop();
+ *arg = cgen()->frame()->Pop();
ASSERT_ARGCHECK(arg);
}
void JumpTarget::Branch(Condition cc, Result* arg0, Result* arg1, Hint hint) {
- ASSERT(cgen_ != NULL);
- ASSERT(cgen_->frame() != NULL);
+ ASSERT(cgen()->frame() != NULL);
// We want to check that non-frame registers at the call site stay in
// the same registers on the fall-through branch.
DECLARE_ARGCHECK_VARS(arg0);
DECLARE_ARGCHECK_VARS(arg1);
- cgen_->frame()->Push(arg0);
- cgen_->frame()->Push(arg1);
+ cgen()->frame()->Push(arg0);
+ cgen()->frame()->Push(arg1);
DoBranch(cc, hint);
- *arg1 = cgen_->frame()->Pop();
- *arg0 = cgen_->frame()->Pop();
+ *arg1 = cgen()->frame()->Pop();
+ *arg0 = cgen()->frame()->Pop();
ASSERT_ARGCHECK(arg0);
ASSERT_ARGCHECK(arg1);
@@ -412,8 +319,7 @@ void JumpTarget::Branch(Condition cc,
Result* arg1,
Result* arg2,
Hint hint) {
- ASSERT(cgen_ != NULL);
- ASSERT(cgen_->frame() != NULL);
+ ASSERT(cgen()->frame() != NULL);
// We want to check that non-frame registers at the call site stay in
// the same registers on the fall-through branch.
@@ -421,13 +327,13 @@ void JumpTarget::Branch(Condition cc,
DECLARE_ARGCHECK_VARS(arg1);
DECLARE_ARGCHECK_VARS(arg2);
- cgen_->frame()->Push(arg0);
- cgen_->frame()->Push(arg1);
- cgen_->frame()->Push(arg2);
+ cgen()->frame()->Push(arg0);
+ cgen()->frame()->Push(arg1);
+ cgen()->frame()->Push(arg2);
DoBranch(cc, hint);
- *arg2 = cgen_->frame()->Pop();
- *arg1 = cgen_->frame()->Pop();
- *arg0 = cgen_->frame()->Pop();
+ *arg2 = cgen()->frame()->Pop();
+ *arg1 = cgen()->frame()->Pop();
+ *arg0 = cgen()->frame()->Pop();
ASSERT_ARGCHECK(arg0);
ASSERT_ARGCHECK(arg1);
@@ -441,8 +347,7 @@ void JumpTarget::Branch(Condition cc,
Result* arg2,
Result* arg3,
Hint hint) {
- ASSERT(cgen_ != NULL);
- ASSERT(cgen_->frame() != NULL);
+ ASSERT(cgen()->frame() != NULL);
// We want to check that non-frame registers at the call site stay in
// the same registers on the fall-through branch.
@@ -451,15 +356,15 @@ void JumpTarget::Branch(Condition cc,
DECLARE_ARGCHECK_VARS(arg2);
DECLARE_ARGCHECK_VARS(arg3);
- cgen_->frame()->Push(arg0);
- cgen_->frame()->Push(arg1);
- cgen_->frame()->Push(arg2);
- cgen_->frame()->Push(arg3);
+ cgen()->frame()->Push(arg0);
+ cgen()->frame()->Push(arg1);
+ cgen()->frame()->Push(arg2);
+ cgen()->frame()->Push(arg3);
DoBranch(cc, hint);
- *arg3 = cgen_->frame()->Pop();
- *arg2 = cgen_->frame()->Pop();
- *arg1 = cgen_->frame()->Pop();
- *arg0 = cgen_->frame()->Pop();
+ *arg3 = cgen()->frame()->Pop();
+ *arg2 = cgen()->frame()->Pop();
+ *arg1 = cgen()->frame()->Pop();
+ *arg0 = cgen()->frame()->Pop();
ASSERT_ARGCHECK(arg0);
ASSERT_ARGCHECK(arg1);
@@ -469,15 +374,14 @@ void JumpTarget::Branch(Condition cc,
void BreakTarget::Branch(Condition cc, Result* arg, Hint hint) {
- ASSERT(cgen_ != NULL);
- ASSERT(cgen_->has_valid_frame());
+ ASSERT(cgen()->has_valid_frame());
- int count = cgen_->frame()->height() - expected_height_;
+ int count = cgen()->frame()->height() - expected_height_;
if (count > 0) {
// We negate and branch here rather than using DoBranch's negate
// and branch. This gives us a hook to remove statement state
// from the frame.
- JumpTarget fall_through(cgen_);
+ JumpTarget fall_through;
// Branch to fall through will not negate, because it is a
// forward-only target.
fall_through.Branch(NegateCondition(cc), NegateHint(hint));
@@ -485,9 +389,9 @@ void BreakTarget::Branch(Condition cc, Result* arg, Hint hint) {
fall_through.Bind();
} else {
DECLARE_ARGCHECK_VARS(arg);
- cgen_->frame()->Push(arg);
+ cgen()->frame()->Push(arg);
DoBranch(cc, hint);
- *arg = cgen_->frame()->Pop();
+ *arg = cgen()->frame()->Pop();
ASSERT_ARGCHECK(arg);
}
}
@@ -502,26 +406,22 @@ void JumpTarget::Bind(int mergable_elements) {
void JumpTarget::Bind(Result* arg, int mergable_elements) {
- ASSERT(cgen_ != NULL);
-
- if (cgen_->has_valid_frame()) {
- cgen_->frame()->Push(arg);
+ if (cgen()->has_valid_frame()) {
+ cgen()->frame()->Push(arg);
}
DoBind(mergable_elements);
- *arg = cgen_->frame()->Pop();
+ *arg = cgen()->frame()->Pop();
}
void JumpTarget::Bind(Result* arg0, Result* arg1, int mergable_elements) {
- ASSERT(cgen_ != NULL);
-
- if (cgen_->has_valid_frame()) {
- cgen_->frame()->Push(arg0);
- cgen_->frame()->Push(arg1);
+ if (cgen()->has_valid_frame()) {
+ cgen()->frame()->Push(arg0);
+ cgen()->frame()->Push(arg1);
}
DoBind(mergable_elements);
- *arg1 = cgen_->frame()->Pop();
- *arg0 = cgen_->frame()->Pop();
+ *arg1 = cgen()->frame()->Pop();
+ *arg0 = cgen()->frame()->Pop();
}
@@ -529,17 +429,15 @@ void JumpTarget::Bind(Result* arg0,
Result* arg1,
Result* arg2,
int mergable_elements) {
- ASSERT(cgen_ != NULL);
-
- if (cgen_->has_valid_frame()) {
- cgen_->frame()->Push(arg0);
- cgen_->frame()->Push(arg1);
- cgen_->frame()->Push(arg2);
+ if (cgen()->has_valid_frame()) {
+ cgen()->frame()->Push(arg0);
+ cgen()->frame()->Push(arg1);
+ cgen()->frame()->Push(arg2);
}
DoBind(mergable_elements);
- *arg2 = cgen_->frame()->Pop();
- *arg1 = cgen_->frame()->Pop();
- *arg0 = cgen_->frame()->Pop();
+ *arg2 = cgen()->frame()->Pop();
+ *arg1 = cgen()->frame()->Pop();
+ *arg0 = cgen()->frame()->Pop();
}
@@ -548,24 +446,23 @@ void JumpTarget::Bind(Result* arg0,
Result* arg2,
Result* arg3,
int mergable_elements) {
- ASSERT(cgen_ != NULL);
-
- if (cgen_->has_valid_frame()) {
- cgen_->frame()->Push(arg0);
- cgen_->frame()->Push(arg1);
- cgen_->frame()->Push(arg2);
- cgen_->frame()->Push(arg3);
+ if (cgen()->has_valid_frame()) {
+ cgen()->frame()->Push(arg0);
+ cgen()->frame()->Push(arg1);
+ cgen()->frame()->Push(arg2);
+ cgen()->frame()->Push(arg3);
}
DoBind(mergable_elements);
- *arg3 = cgen_->frame()->Pop();
- *arg2 = cgen_->frame()->Pop();
- *arg1 = cgen_->frame()->Pop();
- *arg0 = cgen_->frame()->Pop();
+ *arg3 = cgen()->frame()->Pop();
+ *arg2 = cgen()->frame()->Pop();
+ *arg1 = cgen()->frame()->Pop();
+ *arg0 = cgen()->frame()->Pop();
}
void JumpTarget::AddReachingFrame(VirtualFrame* frame) {
ASSERT(reaching_frames_.length() == merge_labels_.length());
+ ASSERT(entry_frame_ == NULL);
Label fresh;
merge_labels_.Add(fresh);
reaching_frames_.Add(frame);
@@ -575,64 +472,54 @@ void JumpTarget::AddReachingFrame(VirtualFrame* frame) {
// -------------------------------------------------------------------------
// BreakTarget implementation.
-void BreakTarget::Initialize(CodeGenerator* cgen, Directionality direction) {
- JumpTarget::Initialize(cgen, direction);
- ASSERT(cgen_->has_valid_frame());
- expected_height_ = cgen_->frame()->height();
+void BreakTarget::set_direction(Directionality direction) {
+ JumpTarget::set_direction(direction);
+ ASSERT(cgen()->has_valid_frame());
+ expected_height_ = cgen()->frame()->height();
}
void BreakTarget::CopyTo(BreakTarget* destination) {
ASSERT(destination != NULL);
- destination->cgen_ = cgen_;
- destination->masm_ = masm_;
destination->direction_ = direction_;
- destination->reaching_frames_.Clear();
- destination->merge_labels_.Clear();
- ASSERT(reaching_frames_.length() == merge_labels_.length());
- for (int i = 0; i < reaching_frames_.length(); i++) {
- destination->reaching_frames_.Add(reaching_frames_[i]);
- destination->merge_labels_.Add(merge_labels_[i]);
- }
+ destination->reaching_frames_.Rewind(0);
+ destination->reaching_frames_.AddAll(reaching_frames_);
+ destination->merge_labels_.Rewind(0);
+ destination->merge_labels_.AddAll(merge_labels_);
destination->entry_frame_ = entry_frame_;
destination->entry_label_ = entry_label_;
- destination->is_bound_ = is_bound_;
- destination->is_linked_ = is_linked_;
destination->expected_height_ = expected_height_;
}
void BreakTarget::Jump() {
- ASSERT(cgen_ != NULL);
- ASSERT(cgen_->has_valid_frame());
+ ASSERT(cgen()->has_valid_frame());
// Drop leftover statement state from the frame before merging.
- cgen_->frame()->ForgetElements(cgen_->frame()->height() - expected_height_);
+ cgen()->frame()->ForgetElements(cgen()->frame()->height() - expected_height_);
DoJump();
}
void BreakTarget::Jump(Result* arg) {
- ASSERT(cgen_ != NULL);
- ASSERT(cgen_->has_valid_frame());
+ ASSERT(cgen()->has_valid_frame());
// Drop leftover statement state from the frame before merging.
- cgen_->frame()->ForgetElements(cgen_->frame()->height() - expected_height_);
- cgen_->frame()->Push(arg);
+ cgen()->frame()->ForgetElements(cgen()->frame()->height() - expected_height_);
+ cgen()->frame()->Push(arg);
DoJump();
}
void BreakTarget::Branch(Condition cc, Hint hint) {
- ASSERT(cgen_ != NULL);
- ASSERT(cgen_->has_valid_frame());
+ ASSERT(cgen()->has_valid_frame());
- int count = cgen_->frame()->height() - expected_height_;
+ int count = cgen()->frame()->height() - expected_height_;
if (count > 0) {
// We negate and branch here rather than using DoBranch's negate
// and branch. This gives us a hook to remove statement state
// from the frame.
- JumpTarget fall_through(cgen_);
+ JumpTarget fall_through;
// Branch to fall through will not negate, because it is a
// forward-only target.
fall_through.Branch(NegateCondition(cc), NegateHint(hint));
@@ -646,7 +533,6 @@ void BreakTarget::Branch(Condition cc, Hint hint) {
void BreakTarget::Bind(int mergable_elements) {
#ifdef DEBUG
- ASSERT(cgen_ != NULL);
// All the forward-reaching frames should have been adjusted at the
// jumps to this target.
for (int i = 0; i < reaching_frames_.length(); i++) {
@@ -657,8 +543,9 @@ void BreakTarget::Bind(int mergable_elements) {
// Drop leftover statement state from the frame before merging, even
// on the fall through. This is so we can bind the return target
// with state on the frame.
- if (cgen_->has_valid_frame()) {
- cgen_->frame()->ForgetElements(cgen_->frame()->height() - expected_height_);
+ if (cgen()->has_valid_frame()) {
+ int count = cgen()->frame()->height() - expected_height_;
+ cgen()->frame()->ForgetElements(count);
}
DoBind(mergable_elements);
}
@@ -666,7 +553,6 @@ void BreakTarget::Bind(int mergable_elements) {
void BreakTarget::Bind(Result* arg, int mergable_elements) {
#ifdef DEBUG
- ASSERT(cgen_ != NULL);
// All the forward-reaching frames should have been adjusted at the
// jumps to this target.
for (int i = 0; i < reaching_frames_.length(); i++) {
@@ -677,12 +563,13 @@ void BreakTarget::Bind(Result* arg, int mergable_elements) {
// Drop leftover statement state from the frame before merging, even
// on the fall through. This is so we can bind the return target
// with state on the frame.
- if (cgen_->has_valid_frame()) {
- cgen_->frame()->ForgetElements(cgen_->frame()->height() - expected_height_);
- cgen_->frame()->Push(arg);
+ if (cgen()->has_valid_frame()) {
+ int count = cgen()->frame()->height() - expected_height_;
+ cgen()->frame()->ForgetElements(count);
+ cgen()->frame()->Push(arg);
}
DoBind(mergable_elements);
- *arg = cgen_->frame()->Pop();
+ *arg = cgen()->frame()->Pop();
}
@@ -699,36 +586,23 @@ ShadowTarget::ShadowTarget(BreakTarget* shadowed) {
// While shadowing this shadow target saves the state of the original.
shadowed->CopyTo(this);
- // The original's state is reset. We do not Unuse it because that
- // would delete the expected frame and assert that the target is not
- // linked.
- shadowed->Reset();
- ASSERT(cgen_ != NULL);
- ASSERT(cgen_->has_valid_frame());
- shadowed->set_expected_height(cgen_->frame()->height());
-
- // Setting the code generator to null prevents the shadow target from
- // being used until shadowing stops.
- cgen_ = NULL;
- masm_ = NULL;
+ // The original's state is reset.
+ shadowed->Unuse();
+ ASSERT(cgen()->has_valid_frame());
+ shadowed->set_expected_height(cgen()->frame()->height());
}
void ShadowTarget::StopShadowing() {
ASSERT(is_shadowing_);
- // This target does not have a valid code generator yet.
- cgen_ = other_target_->code_generator();
- ASSERT(cgen_ != NULL);
- masm_ = cgen_->masm();
-
// The states of this target, which was shadowed, and the original
// target, which was shadowing, are swapped.
BreakTarget temp;
other_target_->CopyTo(&temp);
CopyTo(other_target_);
temp.CopyTo(this);
- temp.Reset(); // So the destructor does not deallocate virtual frames.
+ temp.Unuse();
#ifdef DEBUG
is_shadowing_ = false;
diff --git a/deps/v8/src/jump-target.h b/deps/v8/src/jump-target.h
index 1cfbe29dc..7585fafbf 100644
--- a/deps/v8/src/jump-target.h
+++ b/deps/v8/src/jump-target.h
@@ -28,14 +28,14 @@
#ifndef V8_JUMP_TARGET_H_
#define V8_JUMP_TARGET_H_
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
// Forward declarations.
class FrameElement;
class Result;
class VirtualFrame;
-
// -------------------------------------------------------------------------
// Jump targets
//
@@ -52,43 +52,39 @@ class VirtualFrame;
// In particular, this means that at least one of the control-flow
// graph edges reaching the target must be a forward edge.
-class JumpTarget : public Malloced { // Shadows are dynamically allocated.
+class JumpTarget : public ZoneObject { // Shadows are dynamically allocated.
public:
// Forward-only jump targets can only be reached by forward CFG edges.
enum Directionality { FORWARD_ONLY, BIDIRECTIONAL };
- // Construct a jump target with a given code generator used to generate
- // code and to provide access to a current frame.
- explicit JumpTarget(CodeGenerator* cgen,
- Directionality direction = FORWARD_ONLY);
-
- // Construct a jump target without a code generator. A code
- // generator must be supplied before using the jump target as a
- // label. This is useful, eg, when break targets are embedded in
- // AST nodes.
- JumpTarget();
-
- // Supply a code generator and directionality to an already
- // constructed jump target. This function expects to be given a
- // non-null code generator, and to be called only when the code
- // generator is not yet set.
- virtual void Initialize(CodeGenerator* cgen,
- Directionality direction = FORWARD_ONLY);
-
- virtual ~JumpTarget() { Unuse(); }
-
- // Treat the jump target as a fresh one. The state is reset and
- // pointed-to virtual frames are deallocated. There should be no
- // dangling jumps to the target.
- void Unuse();
+ // Construct a jump target used to generate code and to provide
+ // access to a current frame.
+ explicit JumpTarget(Directionality direction)
+ : direction_(direction),
+ reaching_frames_(0),
+ merge_labels_(0),
+ entry_frame_(NULL) {
+ }
+
+ // Construct a jump target.
+ JumpTarget()
+ : direction_(FORWARD_ONLY),
+ reaching_frames_(0),
+ merge_labels_(0),
+ entry_frame_(NULL) {
+ }
- // Reset the internal state of this jump target. Pointed-to virtual
- // frames are not deallocated and dangling jumps to the target are
- // left dangling.
- void Reset();
+ virtual ~JumpTarget() {}
- // Accessors.
- CodeGenerator* code_generator() const { return cgen_; }
+ // Set the direction of the jump target.
+ virtual void set_direction(Directionality direction) {
+ direction_ = direction;
+ }
+
+ // Treat the jump target as a fresh one. The state is reset.
+ void Unuse();
+
+ inline CodeGenerator* cgen();
Label* entry_label() { return &entry_label_; }
@@ -98,9 +94,14 @@ class JumpTarget : public Malloced { // Shadows are dynamically allocated.
}
// Predicates testing the state of the encapsulated label.
- bool is_bound() const { return is_bound_; }
- bool is_linked() const { return is_linked_; }
- bool is_unused() const { return !is_bound() && !is_linked(); }
+ bool is_bound() const { return entry_label_.is_bound(); }
+ bool is_linked() const {
+ return !is_bound() && !reaching_frames_.is_empty();
+ }
+ bool is_unused() const {
+ // This is !is_bound() && !is_linked().
+ return !is_bound() && reaching_frames_.is_empty();
+ }
// Emit a jump to the target. There must be a current frame at the
// jump and there will be no current frame after the jump.
@@ -161,21 +162,19 @@ class JumpTarget : public Malloced { // Shadows are dynamically allocated.
static const int kAllElements = -1; // Not a valid number of elements.
- protected:
- // The code generator gives access to its current frame.
- CodeGenerator* cgen_;
-
- // Used to emit code.
- MacroAssembler* masm_;
+ static void set_compiling_deferred_code(bool flag) {
+ compiling_deferred_code_ = flag;
+ }
+ protected:
// Directionality flag set at initialization time.
Directionality direction_;
// A list of frames reaching this block via forward jumps.
- List<VirtualFrame*> reaching_frames_;
+ ZoneList<VirtualFrame*> reaching_frames_;
// A parallel list of labels for merge code.
- List<Label> merge_labels_;
+ ZoneList<Label> merge_labels_;
// The frame used on entry to the block and expected at backward
// jumps to the block. Set when the jump target is bound, but may
@@ -185,12 +184,6 @@ class JumpTarget : public Malloced { // Shadows are dynamically allocated.
// The actual entry label of the block.
Label entry_label_;
- // A target is bound if its Bind member function has been called.
- // It is linked if it is not bound but its Jump, Branch, or Call
- // member functions have been called.
- bool is_bound_;
- bool is_linked_;
-
// Implementations of Jump, Branch, and Bind with all arguments and
// return values using the virtual frame.
void DoJump();
@@ -198,13 +191,16 @@ class JumpTarget : public Malloced { // Shadows are dynamically allocated.
void DoBind(int mergable_elements);
private:
- // Add a virtual frame reaching this labeled block via a forward
- // jump, and a fresh label for its merge code.
+ static bool compiling_deferred_code_;
+
+ // Add a virtual frame reaching this labeled block via a forward jump,
+ // and a corresponding merge code label.
void AddReachingFrame(VirtualFrame* frame);
- // Choose an element from a pair of frame elements to be in the
- // expected frame. Return null if they are incompatible.
- FrameElement* Combine(FrameElement* left, FrameElement* right);
+ // Perform initialization required during entry frame computation
+ // after setting the virtual frame element at index in frame to be
+ // target.
+ inline void InitializeEntryElement(int index, FrameElement* target);
// Compute a frame to use for entry to this block. Mergable
// elements is as described for the Bind function.
@@ -226,18 +222,13 @@ class JumpTarget : public Malloced { // Shadows are dynamically allocated.
class BreakTarget : public JumpTarget {
public:
- // Construct a break target without a code generator. A code
- // generator must be supplied before using the break target as a
- // label. This is useful, eg, when break targets are embedded in AST
- // nodes.
+ // Construct a break target.
BreakTarget() {}
- // Supply a code generator, expected expression stack height, and
- // directionality to an already constructed break target. This
- // function expects to be given a non-null code generator, and to be
- // called only when the code generator is not yet set.
- virtual void Initialize(CodeGenerator* cgen,
- Directionality direction = FORWARD_ONLY);
+ virtual ~BreakTarget() {}
+
+ // Set the direction of the break target.
+ virtual void set_direction(Directionality direction);
// Copy the state of this break target to the destination. The
// lists of forward-reaching frames and merge-point labels are
@@ -294,9 +285,7 @@ class ShadowTarget : public BreakTarget {
// flow intended for the shadowed one.
explicit ShadowTarget(BreakTarget* shadowed);
- virtual ~ShadowTarget() {
- ASSERT(!is_shadowing_);
- }
+ virtual ~ShadowTarget() {}
// End shadowing. After shadowing ends, the original jump target
// again gives access to the formerly shadowed target and the shadow
diff --git a/deps/v8/src/list-inl.h b/deps/v8/src/list-inl.h
index e3d251fba..e41db11fc 100644
--- a/deps/v8/src/list-inl.h
+++ b/deps/v8/src/list-inl.h
@@ -30,7 +30,8 @@
#include "list.h"
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
template<typename T, class P>
@@ -43,6 +44,17 @@ void List<T, P>::Add(const T& element) {
}
+template<typename T, class P>
+void List<T, P>::AddAll(const List<T, P>& other) {
+ int result_length = length_ + other.length_;
+ if (capacity_ < result_length) Resize(result_length);
+ for (int i = 0; i < other.length_; i++) {
+ data_[length_ + i] = other.data_[i];
+ }
+ length_ = result_length;
+}
+
+
// Use two layers of inlining so that the non-inlined function can
// use the same implementation as the inlined version.
template<typename T, class P>
@@ -57,11 +69,18 @@ void List<T, P>::ResizeAddInternal(const T& element) {
// Grow the list capacity by 50%, but make sure to let it grow
// even when the capacity is zero (possible initial case).
int new_capacity = 1 + capacity_ + (capacity_ >> 1);
+ // Since the element reference could be an element of the list, copy
+ // it out of the old backing storage before resizing.
+ T temp = element;
+ Resize(new_capacity);
+ data_[length_++] = temp;
+}
+
+
+template<typename T, class P>
+void List<T, P>::Resize(int new_capacity) {
T* new_data = List<T, P>::NewData(new_capacity);
memcpy(new_data, data_, capacity_ * sizeof(T));
- // Since the element reference could be an element of the list,
- // assign it to the new backing store before deleting the old.
- new_data[length_++] = element;
List<T, P>::DeleteData(data_);
data_ = new_data;
capacity_ = new_capacity;
diff --git a/deps/v8/src/list.h b/deps/v8/src/list.h
index 92d23ea39..b6c06d846 100644
--- a/deps/v8/src/list.h
+++ b/deps/v8/src/list.h
@@ -28,7 +28,8 @@
#ifndef V8_LIST_H_
#define V8_LIST_H_
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
// ----------------------------------------------------------------------------
@@ -77,6 +78,9 @@ class List {
// expanding the list if necessary.
void Add(const T& element);
+ // Add all the elements from the argument list to this list.
+ void AddAll(const List<T, P>& other);
+
// Added 'count' elements with the value 'value' and returns a
// vector that allows access to the elements. The vector is valid
// until the next change is made to this list.
@@ -126,6 +130,9 @@ class List {
// non-inlined versions of ResizeAdd.
void ResizeAddInternal(const T& element);
+ // Resize the list.
+ void Resize(int new_capacity);
+
DISALLOW_COPY_AND_ASSIGN(List);
};
diff --git a/deps/v8/src/log-utils.cc b/deps/v8/src/log-utils.cc
new file mode 100644
index 000000000..43610497e
--- /dev/null
+++ b/deps/v8/src/log-utils.cc
@@ -0,0 +1,302 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "log-utils.h"
+
+namespace v8 {
+namespace internal {
+
+#ifdef ENABLE_LOGGING_AND_PROFILING
+
+LogDynamicBuffer::LogDynamicBuffer(
+ int block_size, int max_size, const char* seal, int seal_size)
+ : block_size_(block_size),
+ max_size_(max_size - (max_size % block_size_)),
+ seal_(seal),
+ seal_size_(seal_size),
+ blocks_(max_size_ / block_size_ + 1),
+ write_pos_(0), block_index_(0), block_write_pos_(0), is_sealed_(false) {
+ ASSERT(BlocksCount() > 0);
+ AllocateBlock(0);
+ for (int i = 1; i < BlocksCount(); ++i) {
+ blocks_[i] = NULL;
+ }
+}
+
+
+LogDynamicBuffer::~LogDynamicBuffer() {
+ for (int i = 0; i < BlocksCount(); ++i) {
+ DeleteArray(blocks_[i]);
+ }
+}
+
+
+int LogDynamicBuffer::Read(int from_pos, char* dest_buf, int buf_size) {
+ if (buf_size == 0) return 0;
+ int read_pos = from_pos;
+ int block_read_index = BlockIndex(from_pos);
+ int block_read_pos = PosInBlock(from_pos);
+ int dest_buf_pos = 0;
+ // Read until dest_buf is filled, or write_pos_ encountered.
+ while (read_pos < write_pos_ && dest_buf_pos < buf_size) {
+ const int read_size = Min(write_pos_ - read_pos,
+ Min(buf_size - dest_buf_pos, block_size_ - block_read_pos));
+ memcpy(dest_buf + dest_buf_pos,
+ blocks_[block_read_index] + block_read_pos, read_size);
+ block_read_pos += read_size;
+ dest_buf_pos += read_size;
+ read_pos += read_size;
+ if (block_read_pos == block_size_) {
+ block_read_pos = 0;
+ ++block_read_index;
+ }
+ }
+ return dest_buf_pos;
+}
+
+
+int LogDynamicBuffer::Seal() {
+ WriteInternal(seal_, seal_size_);
+ is_sealed_ = true;
+ return 0;
+}
+
+
+int LogDynamicBuffer::Write(const char* data, int data_size) {
+ if (is_sealed_) {
+ return 0;
+ }
+ if ((write_pos_ + data_size) <= (max_size_ - seal_size_)) {
+ return WriteInternal(data, data_size);
+ } else {
+ return Seal();
+ }
+}
+
+
+int LogDynamicBuffer::WriteInternal(const char* data, int data_size) {
+ int data_pos = 0;
+ while (data_pos < data_size) {
+ const int write_size =
+ Min(data_size - data_pos, block_size_ - block_write_pos_);
+ memcpy(blocks_[block_index_] + block_write_pos_, data + data_pos,
+ write_size);
+ block_write_pos_ += write_size;
+ data_pos += write_size;
+ if (block_write_pos_ == block_size_) {
+ block_write_pos_ = 0;
+ AllocateBlock(++block_index_);
+ }
+ }
+ write_pos_ += data_size;
+ return data_size;
+}
+
+
+bool Log::is_stopped_ = false;
+Log::WritePtr Log::Write = NULL;
+FILE* Log::output_handle_ = NULL;
+LogDynamicBuffer* Log::output_buffer_ = NULL;
+// Must be the same message as in Logger::PauseProfiler
+const char* Log::kDynamicBufferSeal = "profiler,\"pause\"\n";
+Mutex* Log::mutex_ = NULL;
+char* Log::message_buffer_ = NULL;
+
+
+void Log::Init() {
+ mutex_ = OS::CreateMutex();
+ message_buffer_ = NewArray<char>(kMessageBufferSize);
+}
+
+
+void Log::OpenStdout() {
+ ASSERT(!IsEnabled());
+ output_handle_ = stdout;
+ Write = WriteToFile;
+ Init();
+}
+
+
+void Log::OpenFile(const char* name) {
+ ASSERT(!IsEnabled());
+ output_handle_ = OS::FOpen(name, OS::LogFileOpenMode);
+ Write = WriteToFile;
+ Init();
+}
+
+
+void Log::OpenMemoryBuffer() {
+ ASSERT(!IsEnabled());
+ output_buffer_ = new LogDynamicBuffer(
+ kDynamicBufferBlockSize, kMaxDynamicBufferSize,
+ kDynamicBufferSeal, strlen(kDynamicBufferSeal));
+ Write = WriteToMemory;
+ Init();
+}
+
+
+void Log::Close() {
+ if (Write == WriteToFile) {
+ fclose(output_handle_);
+ output_handle_ = NULL;
+ } else if (Write == WriteToMemory) {
+ delete output_buffer_;
+ output_buffer_ = NULL;
+ } else {
+ ASSERT(Write == NULL);
+ }
+ Write = NULL;
+
+ delete mutex_;
+ mutex_ = NULL;
+
+ is_stopped_ = false;
+}
+
+
+int Log::GetLogLines(int from_pos, char* dest_buf, int max_size) {
+ if (Write != WriteToMemory) return 0;
+ ASSERT(output_buffer_ != NULL);
+ ASSERT(from_pos >= 0);
+ ASSERT(max_size >= 0);
+ int actual_size = output_buffer_->Read(from_pos, dest_buf, max_size);
+ ASSERT(actual_size <= max_size);
+ if (actual_size == 0) return 0;
+
+ // Find previous log line boundary.
+ char* end_pos = dest_buf + actual_size - 1;
+ while (end_pos >= dest_buf && *end_pos != '\n') --end_pos;
+ actual_size = end_pos - dest_buf + 1;
+ ASSERT(actual_size <= max_size);
+ return actual_size;
+}
+
+
+LogMessageBuilder::WriteFailureHandler
+ LogMessageBuilder::write_failure_handler = NULL;
+
+
+LogMessageBuilder::LogMessageBuilder(): sl(Log::mutex_), pos_(0) {
+ ASSERT(Log::message_buffer_ != NULL);
+}
+
+
+void LogMessageBuilder::Append(const char* format, ...) {
+ Vector<char> buf(Log::message_buffer_ + pos_,
+ Log::kMessageBufferSize - pos_);
+ va_list args;
+ va_start(args, format);
+ Append(format, args);
+ va_end(args);
+ ASSERT(pos_ <= Log::kMessageBufferSize);
+}
+
+
+void LogMessageBuilder::Append(const char* format, va_list args) {
+ Vector<char> buf(Log::message_buffer_ + pos_,
+ Log::kMessageBufferSize - pos_);
+ int result = v8::internal::OS::VSNPrintF(buf, format, args);
+
+ // Result is -1 if output was truncated.
+ if (result >= 0) {
+ pos_ += result;
+ } else {
+ pos_ = Log::kMessageBufferSize;
+ }
+ ASSERT(pos_ <= Log::kMessageBufferSize);
+}
+
+
+void LogMessageBuilder::Append(const char c) {
+ if (pos_ < Log::kMessageBufferSize) {
+ Log::message_buffer_[pos_++] = c;
+ }
+ ASSERT(pos_ <= Log::kMessageBufferSize);
+}
+
+
+void LogMessageBuilder::Append(String* str) {
+ AssertNoAllocation no_heap_allocation; // Ensure string stay valid.
+ int length = str->length();
+ for (int i = 0; i < length; i++) {
+ Append(static_cast<char>(str->Get(i)));
+ }
+}
+
+
+void LogMessageBuilder::AppendDetailed(String* str, bool show_impl_info) {
+ AssertNoAllocation no_heap_allocation; // Ensure string stay valid.
+ int len = str->length();
+ if (len > 0x1000)
+ len = 0x1000;
+ if (show_impl_info) {
+ Append(str->IsAsciiRepresentation() ? 'a' : '2');
+ if (StringShape(str).IsExternal())
+ Append('e');
+ if (StringShape(str).IsSymbol())
+ Append('#');
+ Append(":%i:", str->length());
+ }
+ for (int i = 0; i < len; i++) {
+ uc32 c = str->Get(i);
+ if (c > 0xff) {
+ Append("\\u%04x", c);
+ } else if (c < 32 || c > 126) {
+ Append("\\x%02x", c);
+ } else if (c == ',') {
+ Append("\\,");
+ } else if (c == '\\') {
+ Append("\\\\");
+ } else {
+ Append("%lc", c);
+ }
+ }
+}
+
+
+void LogMessageBuilder::WriteToLogFile() {
+ ASSERT(pos_ <= Log::kMessageBufferSize);
+ const int written = Log::Write(Log::message_buffer_, pos_);
+ if (written != pos_ && write_failure_handler != NULL) {
+ write_failure_handler();
+ }
+}
+
+
+void LogMessageBuilder::WriteCStringToLogFile(const char* str) {
+ const int len = strlen(str);
+ const int written = Log::Write(str, len);
+ if (written != len && write_failure_handler != NULL) {
+ write_failure_handler();
+ }
+}
+
+#endif // ENABLE_LOGGING_AND_PROFILING
+
+} } // namespace v8::internal
diff --git a/deps/v8/src/log-utils.h b/deps/v8/src/log-utils.h
new file mode 100644
index 000000000..2e8b3a364
--- /dev/null
+++ b/deps/v8/src/log-utils.h
@@ -0,0 +1,223 @@
+// Copyright 2006-2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_LOG_UTILS_H_
+#define V8_LOG_UTILS_H_
+
+namespace v8 {
+namespace internal {
+
+#ifdef ENABLE_LOGGING_AND_PROFILING
+
+// A memory buffer that increments its size as you write in it. Size
+// is incremented with 'block_size' steps, never exceeding 'max_size'.
+// During growth, memory contents are never copied. At the end of the
+// buffer an amount of memory specified in 'seal_size' is reserved.
+// When writing position reaches max_size - seal_size, buffer auto-seals
+// itself with 'seal' and allows no further writes. Data pointed by
+// 'seal' must be available during entire LogDynamicBuffer lifetime.
+//
+// An instance of this class is created dynamically by Log.
+class LogDynamicBuffer {
+ public:
+ LogDynamicBuffer(
+ int block_size, int max_size, const char* seal, int seal_size);
+
+ ~LogDynamicBuffer();
+
+ // Reads contents of the buffer starting from 'from_pos'. Upon
+ // return, 'dest_buf' is filled with the data. Actual amount of data
+ // filled is returned, it is <= 'buf_size'.
+ int Read(int from_pos, char* dest_buf, int buf_size);
+
+ // Writes 'data' to the buffer, making it larger if necessary. If
+ // data is too big to fit in the buffer, it doesn't get written at
+ // all. In that case, buffer auto-seals itself and stops to accept
+ // any incoming writes. Returns amount of data written (it is either
+ // 'data_size', or 0, if 'data' is too big).
+ int Write(const char* data, int data_size);
+
+ private:
+ void AllocateBlock(int index) {
+ blocks_[index] = NewArray<char>(block_size_);
+ }
+
+ int BlockIndex(int pos) const { return pos / block_size_; }
+
+ int BlocksCount() const { return BlockIndex(max_size_) + 1; }
+
+ int PosInBlock(int pos) const { return pos % block_size_; }
+
+ int Seal();
+
+ int WriteInternal(const char* data, int data_size);
+
+ const int block_size_;
+ const int max_size_;
+ const char* seal_;
+ const int seal_size_;
+ ScopedVector<char*> blocks_;
+ int write_pos_;
+ int block_index_;
+ int block_write_pos_;
+ bool is_sealed_;
+};
+
+
+// Functions and data for performing output of log messages.
+class Log : public AllStatic {
+ public:
+ // Opens stdout for logging.
+ static void OpenStdout();
+
+ // Opens file for logging.
+ static void OpenFile(const char* name);
+
+ // Opens memory buffer for logging.
+ static void OpenMemoryBuffer();
+
+ // Disables logging, but preserves acquired resources.
+ static void stop() { is_stopped_ = true; }
+
+ // Frees all resources acquired in Open... functions.
+ static void Close();
+
+ // See description in include/v8.h.
+ static int GetLogLines(int from_pos, char* dest_buf, int max_size);
+
+ // Returns whether logging is enabled.
+ static bool IsEnabled() {
+ return !is_stopped_ && (output_handle_ != NULL || output_buffer_ != NULL);
+ }
+
+ private:
+ typedef int (*WritePtr)(const char* msg, int length);
+
+ // Initialization function called from Open... functions.
+ static void Init();
+
+ // Write functions assume that mutex_ is acquired by the caller.
+ static WritePtr Write;
+
+ // Implementation of writing to a log file.
+ static int WriteToFile(const char* msg, int length) {
+ ASSERT(output_handle_ != NULL);
+ int rv = fwrite(msg, 1, length, output_handle_);
+ ASSERT(length == rv);
+ return rv;
+ }
+
+ // Implementation of writing to a memory buffer.
+ static int WriteToMemory(const char* msg, int length) {
+ ASSERT(output_buffer_ != NULL);
+ return output_buffer_->Write(msg, length);
+ }
+
+ // Whether logging is stopped (e.g. due to insufficient resources).
+ static bool is_stopped_;
+
+ // When logging is active, either output_handle_ or output_buffer_ is used
+ // to store a pointer to log destination. If logging was opened via OpenStdout
+ // or OpenFile, then output_handle_ is used. If logging was opened
+ // via OpenMemoryBuffer, then output_buffer_ is used.
+ // mutex_ should be acquired before using output_handle_ or output_buffer_.
+ static FILE* output_handle_;
+
+ static LogDynamicBuffer* output_buffer_;
+
+ // Size of dynamic buffer block (and dynamic buffer initial size).
+ static const int kDynamicBufferBlockSize = 65536;
+
+ // Maximum size of dynamic buffer.
+ static const int kMaxDynamicBufferSize = 50 * 1024 * 1024;
+
+ // Message to "seal" dynamic buffer with.
+ static const char* kDynamicBufferSeal;
+
+ // mutex_ is a Mutex used for enforcing exclusive
+ // access to the formatting buffer and the log file or log memory buffer.
+ static Mutex* mutex_;
+
+ // Size of buffer used for formatting log messages.
+ static const int kMessageBufferSize = 2048;
+
+ // Buffer used for formatting log messages. This is a singleton buffer and
+ // mutex_ should be acquired before using it.
+ static char* message_buffer_;
+
+ friend class LogMessageBuilder;
+};
+
+
+// Utility class for formatting log messages. It fills the message into the
+// static buffer in Log.
+class LogMessageBuilder BASE_EMBEDDED {
+ public:
+ // Create a message builder starting from position 0. This acquires the mutex
+ // in the log as well.
+ explicit LogMessageBuilder();
+ ~LogMessageBuilder() { }
+
+ // Append string data to the log message.
+ void Append(const char* format, ...);
+
+ // Append string data to the log message.
+ void Append(const char* format, va_list args);
+
+ // Append a character to the log message.
+ void Append(const char c);
+
+ // Append a heap string.
+ void Append(String* str);
+
+ void AppendDetailed(String* str, bool show_impl_info);
+
+ // Write the log message to the log file currently opened.
+ void WriteToLogFile();
+
+ // Write a null-terminated string to to the log file currently opened.
+ void WriteCStringToLogFile(const char* str);
+
+ // A handler that is called when Log::Write fails.
+ typedef void (*WriteFailureHandler)();
+
+ static void set_write_failure_handler(WriteFailureHandler handler) {
+ write_failure_handler = handler;
+ }
+
+ private:
+ static WriteFailureHandler write_failure_handler;
+
+ ScopedLock sl;
+ int pos_;
+};
+
+#endif // ENABLE_LOGGING_AND_PROFILING
+
+} } // namespace v8::internal
+
+#endif // V8_LOG_UTILS_H_
diff --git a/deps/v8/src/log.cc b/deps/v8/src/log.cc
index 5297a302d..c1edf4d18 100644
--- a/deps/v8/src/log.cc
+++ b/deps/v8/src/log.cc
@@ -31,12 +31,14 @@
#include "bootstrapper.h"
#include "log.h"
+#include "log-utils.h"
#include "macro-assembler.h"
#include "platform.h"
#include "serialize.h"
#include "string-stream.h"
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
#ifdef ENABLE_LOGGING_AND_PROFILING
@@ -144,11 +146,18 @@ void StackTracer::Trace(TickSample* sample) {
return;
}
+ const Address js_entry_sp = Top::js_entry_sp(Top::GetCurrentThread());
+ if (js_entry_sp == 0) {
+ // Not executing JS now.
+ sample->frames_count = 0;
+ return;
+ }
+
SafeStackTraceFrameIterator it(
reinterpret_cast<Address>(sample->fp),
reinterpret_cast<Address>(sample->sp),
reinterpret_cast<Address>(sample->sp),
- reinterpret_cast<Address>(low_stack_bound_));
+ js_entry_sp);
int i = 0;
while (!it.done() && i < TickSample::kMaxFramesCount) {
sample->stack[i++] = it.frame()->pc();
@@ -164,14 +173,13 @@ void StackTracer::Trace(TickSample* sample) {
//
class Ticker: public Sampler {
public:
- explicit Ticker(int interval, uintptr_t low_stack_bound):
- Sampler(interval, FLAG_prof), window_(NULL), profiler_(NULL),
- stack_tracer_(low_stack_bound) {}
+ explicit Ticker(int interval):
+ Sampler(interval, FLAG_prof), window_(NULL), profiler_(NULL) {}
~Ticker() { if (IsActive()) Stop(); }
void Tick(TickSample* sample) {
- if (IsProfiling()) stack_tracer_.Trace(sample);
+ if (IsProfiling()) StackTracer::Trace(sample);
if (profiler_) profiler_->Insert(sample);
if (window_) window_->AddState(sample->state);
}
@@ -188,7 +196,7 @@ class Ticker: public Sampler {
void SetProfiler(Profiler* profiler) {
profiler_ = profiler;
- if (!IsActive()) Start();
+ if (!FLAG_prof_lazy && !IsActive()) Start();
}
void ClearProfiler() {
@@ -199,7 +207,6 @@ class Ticker: public Sampler {
private:
SlidingStateWindow* window_;
Profiler* profiler_;
- StackTracer stack_tracer_;
};
@@ -254,7 +261,7 @@ void Profiler::Engage() {
// Register to get ticks.
Logger::ticker_->SetProfiler(this);
- LOG(UncheckedStringEvent("profiler", "begin"));
+ Logger::ProfilerBeginEvent();
}
@@ -267,6 +274,8 @@ void Profiler::Disengage() {
// the thread to terminate.
running_ = false;
TickSample sample;
+ // Reset 'paused_' flag, otherwise semaphore may not be signalled.
+ resume();
Insert(&sample);
Join();
@@ -284,296 +293,6 @@ void Profiler::Run() {
}
-#ifdef ENABLE_LOGGING_AND_PROFILING
-
-// Functions and data for performing output of log messages.
-class Log : public AllStatic {
- public:
- // Opens stdout for logging.
- static void OpenStdout();
-
- // Opens file for logging.
- static void OpenFile(const char* name);
-
- // Opens memory buffer for logging.
- static void OpenMemoryBuffer();
-
- // Frees all resources acquired in Open... functions.
- static void Close();
-
- // See description in v8.h.
- static int GetLogLines(int from_pos, char* dest_buf, int max_size);
-
- static bool is_enabled() { return output_.handle != NULL; }
-
- typedef int (*WritePtr)(const char* msg, int length);
- private:
- static void Init();
-
- // Write functions assume that mutex_ is acquired by the caller.
- static WritePtr Write;
-
- static int WriteToFile(const char* msg, int length) {
- ASSERT(output_.handle != NULL);
- int rv = fwrite(msg, 1, length, output_.handle);
- ASSERT(length == rv);
- return rv;
- }
-
- static int WriteToMemory(const char* msg, int length) {
- ASSERT(output_.buffer != NULL);
- ASSERT(output_buffer_write_pos_ >= output_.buffer);
- if (output_buffer_write_pos_ + length
- <= output_.buffer + kOutputBufferSize) {
- memcpy(output_buffer_write_pos_, msg, length);
- output_buffer_write_pos_ += length;
- return length;
- } else {
- // Memory buffer is full, ignore write.
- return 0;
- }
- }
-
- // When logging is active, output_ refers the file or memory buffer
- // events are written to.
- // mutex_ should be acquired before using output_.
- union Output {
- FILE* handle;
- char* buffer;
- };
- static Output output_;
-
- // mutex_ is a Mutex used for enforcing exclusive
- // access to the formatting buffer and the log file or log memory buffer.
- static Mutex* mutex_;
-
- // Size of buffer used for memory logging.
- static const int kOutputBufferSize = 2 * 1024 * 1024;
-
- // Writing position in a memory buffer.
- static char* output_buffer_write_pos_;
-
- // Size of buffer used for formatting log messages.
- static const int kMessageBufferSize = 2048;
-
- // Buffer used for formatting log messages. This is a singleton buffer and
- // mutex_ should be acquired before using it.
- static char* message_buffer_;
-
- friend class LogMessageBuilder;
-};
-
-
-Log::WritePtr Log::Write = NULL;
-Log::Output Log::output_ = {NULL};
-Mutex* Log::mutex_ = NULL;
-char* Log::output_buffer_write_pos_ = NULL;
-char* Log::message_buffer_ = NULL;
-
-
-void Log::Init() {
- mutex_ = OS::CreateMutex();
- message_buffer_ = NewArray<char>(kMessageBufferSize);
-}
-
-
-void Log::OpenStdout() {
- ASSERT(output_.handle == NULL);
- output_.handle = stdout;
- Write = WriteToFile;
- Init();
-}
-
-
-void Log::OpenFile(const char* name) {
- ASSERT(output_.handle == NULL);
- output_.handle = OS::FOpen(name, OS::LogFileOpenMode);
- Write = WriteToFile;
- Init();
-}
-
-
-void Log::OpenMemoryBuffer() {
- ASSERT(output_.buffer == NULL);
- output_.buffer = NewArray<char>(kOutputBufferSize);
- output_buffer_write_pos_ = output_.buffer;
- Write = WriteToMemory;
- Init();
-}
-
-
-void Log::Close() {
- if (Write == WriteToFile) {
- fclose(output_.handle);
- output_.handle = NULL;
- } else if (Write == WriteToMemory) {
- DeleteArray(output_.buffer);
- output_.buffer = NULL;
- } else {
- ASSERT(Write == NULL);
- }
- Write = NULL;
-
- delete mutex_;
- mutex_ = NULL;
-
- DeleteArray(message_buffer_);
- message_buffer_ = NULL;
-}
-
-
-int Log::GetLogLines(int from_pos, char* dest_buf, int max_size) {
- if (Write != WriteToMemory) return 0;
- ASSERT(output_.buffer != NULL);
- ASSERT(output_buffer_write_pos_ >= output_.buffer);
- ASSERT(from_pos >= 0);
- ASSERT(max_size >= 0);
- int actual_size = max_size;
- char* buffer_read_pos = output_.buffer + from_pos;
- ScopedLock sl(mutex_);
- if (actual_size == 0
- || output_buffer_write_pos_ == output_.buffer
- || buffer_read_pos >= output_buffer_write_pos_) {
- // No data requested or can be returned.
- return 0;
- }
- if (buffer_read_pos + actual_size > output_buffer_write_pos_) {
- // Requested size overlaps with current writing position and
- // needs to be truncated.
- actual_size = output_buffer_write_pos_ - buffer_read_pos;
- ASSERT(actual_size == 0 || buffer_read_pos[actual_size - 1] == '\n');
- } else {
- // Find previous log line boundary.
- char* end_pos = buffer_read_pos + actual_size - 1;
- while (end_pos >= buffer_read_pos && *end_pos != '\n') --end_pos;
- actual_size = end_pos - buffer_read_pos + 1;
- }
- ASSERT(actual_size <= max_size);
- if (actual_size > 0) {
- memcpy(dest_buf, buffer_read_pos, actual_size);
- }
- return actual_size;
-}
-
-
-// Utility class for formatting log messages. It fills the message into the
-// static buffer in Log.
-class LogMessageBuilder BASE_EMBEDDED {
- public:
- explicit LogMessageBuilder();
- ~LogMessageBuilder() { }
-
- void Append(const char* format, ...);
- void Append(const char* format, va_list args);
- void Append(const char c);
- void Append(String *str);
- void AppendDetailed(String* str, bool show_impl_info);
-
- void WriteToLogFile();
- void WriteCStringToLogFile(const char* str);
-
- private:
- ScopedLock sl;
- int pos_;
-};
-
-
-// Create a message builder starting from position 0. This acquires the mutex
-// in the logger as well.
-LogMessageBuilder::LogMessageBuilder(): sl(Log::mutex_), pos_(0) {
- ASSERT(Log::message_buffer_ != NULL);
-}
-
-
-// Append string data to the log message.
-void LogMessageBuilder::Append(const char* format, ...) {
- Vector<char> buf(Log::message_buffer_ + pos_,
- Log::kMessageBufferSize - pos_);
- va_list args;
- va_start(args, format);
- Append(format, args);
- va_end(args);
- ASSERT(pos_ <= Log::kMessageBufferSize);
-}
-
-
-// Append string data to the log message.
-void LogMessageBuilder::Append(const char* format, va_list args) {
- Vector<char> buf(Log::message_buffer_ + pos_,
- Log::kMessageBufferSize - pos_);
- int result = v8::internal::OS::VSNPrintF(buf, format, args);
-
- // Result is -1 if output was truncated.
- if (result >= 0) {
- pos_ += result;
- } else {
- pos_ = Log::kMessageBufferSize;
- }
- ASSERT(pos_ <= Log::kMessageBufferSize);
-}
-
-
-// Append a character to the log message.
-void LogMessageBuilder::Append(const char c) {
- if (pos_ < Log::kMessageBufferSize) {
- Log::message_buffer_[pos_++] = c;
- }
- ASSERT(pos_ <= Log::kMessageBufferSize);
-}
-
-
-// Append a heap string.
-void LogMessageBuilder::Append(String* str) {
- AssertNoAllocation no_heap_allocation; // Ensure string stay valid.
- int length = str->length();
- for (int i = 0; i < length; i++) {
- Append(static_cast<char>(str->Get(i)));
- }
-}
-
-void LogMessageBuilder::AppendDetailed(String* str, bool show_impl_info) {
- AssertNoAllocation no_heap_allocation; // Ensure string stay valid.
- int len = str->length();
- if (len > 0x1000)
- len = 0x1000;
- if (show_impl_info) {
- Append(str->IsAsciiRepresentation() ? 'a' : '2');
- if (StringShape(str).IsExternal())
- Append('e');
- if (StringShape(str).IsSymbol())
- Append('#');
- Append(":%i:", str->length());
- }
- for (int i = 0; i < len; i++) {
- uc32 c = str->Get(i);
- if (c > 0xff) {
- Append("\\u%04x", c);
- } else if (c < 32 || c > 126) {
- Append("\\x%02x", c);
- } else if (c == ',') {
- Append("\\,");
- } else if (c == '\\') {
- Append("\\\\");
- } else {
- Append("%lc", c);
- }
- }
-}
-
-// Write the log message to the log file currently opened.
-void LogMessageBuilder::WriteToLogFile() {
- ASSERT(pos_ <= Log::kMessageBufferSize);
- Log::Write(Log::message_buffer_, pos_);
-}
-
-// Write a null-terminated string to to the log file currently opened.
-void LogMessageBuilder::WriteCStringToLogFile(const char* str) {
- int len = strlen(str);
- Log::Write(str, len);
-}
-#endif
-
-
//
// Logger class implementation.
//
@@ -584,8 +303,16 @@ VMState Logger::bottom_state_(EXTERNAL);
SlidingStateWindow* Logger::sliding_state_window_ = NULL;
-bool Logger::is_enabled() {
- return Log::is_enabled();
+bool Logger::IsEnabled() {
+ return Log::IsEnabled();
+}
+
+
+void Logger::ProfilerBeginEvent() {
+ if (!Log::IsEnabled()) return;
+ LogMessageBuilder msg;
+ msg.Append("profiler,\"begin\",%d\n", kSamplingIntervalMs);
+ msg.WriteToLogFile();
}
#endif // ENABLE_LOGGING_AND_PROFILING
@@ -593,7 +320,7 @@ bool Logger::is_enabled() {
void Logger::Preamble(const char* content) {
#ifdef ENABLE_LOGGING_AND_PROFILING
- if (!Log::is_enabled() || !FLAG_log_code) return;
+ if (!Log::IsEnabled() || !FLAG_log_code) return;
LogMessageBuilder msg;
msg.WriteCStringToLogFile(content);
#endif
@@ -609,7 +336,7 @@ void Logger::StringEvent(const char* name, const char* value) {
#ifdef ENABLE_LOGGING_AND_PROFILING
void Logger::UncheckedStringEvent(const char* name, const char* value) {
- if (!Log::is_enabled()) return;
+ if (!Log::IsEnabled()) return;
LogMessageBuilder msg;
msg.Append("%s,\"%s\"\n", name, value);
msg.WriteToLogFile();
@@ -619,7 +346,7 @@ void Logger::UncheckedStringEvent(const char* name, const char* value) {
void Logger::IntEvent(const char* name, int value) {
#ifdef ENABLE_LOGGING_AND_PROFILING
- if (!Log::is_enabled() || !FLAG_log) return;
+ if (!Log::IsEnabled() || !FLAG_log) return;
LogMessageBuilder msg;
msg.Append("%s,%d\n", name, value);
msg.WriteToLogFile();
@@ -629,9 +356,9 @@ void Logger::IntEvent(const char* name, int value) {
void Logger::HandleEvent(const char* name, Object** location) {
#ifdef ENABLE_LOGGING_AND_PROFILING
- if (!Log::is_enabled() || !FLAG_log_handles) return;
+ if (!Log::IsEnabled() || !FLAG_log_handles) return;
LogMessageBuilder msg;
- msg.Append("%s,0x%%"V8PRIp"\n", name, location);
+ msg.Append("%s,0x%" V8PRIxPTR "\n", name, location);
msg.WriteToLogFile();
#endif
}
@@ -642,7 +369,7 @@ void Logger::HandleEvent(const char* name, Object** location) {
// caller's responsibility to ensure that log is enabled and that
// FLAG_log_api is true.
void Logger::ApiEvent(const char* format, ...) {
- ASSERT(Log::is_enabled() && FLAG_log_api);
+ ASSERT(Log::IsEnabled() && FLAG_log_api);
LogMessageBuilder msg;
va_list ap;
va_start(ap, format);
@@ -655,7 +382,7 @@ void Logger::ApiEvent(const char* format, ...) {
void Logger::ApiNamedSecurityCheck(Object* key) {
#ifdef ENABLE_LOGGING_AND_PROFILING
- if (!Log::is_enabled() || !FLAG_log_api) return;
+ if (!Log::IsEnabled() || !FLAG_log_api) return;
if (key->IsString()) {
SmartPointer<char> str =
String::cast(key)->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
@@ -673,7 +400,7 @@ void Logger::SharedLibraryEvent(const char* library_path,
unsigned start,
unsigned end) {
#ifdef ENABLE_LOGGING_AND_PROFILING
- if (!Log::is_enabled() || !FLAG_prof) return;
+ if (!Log::IsEnabled() || !FLAG_prof) return;
LogMessageBuilder msg;
msg.Append("shared-library,\"%s\",0x%08x,0x%08x\n", library_path,
start, end);
@@ -686,7 +413,7 @@ void Logger::SharedLibraryEvent(const wchar_t* library_path,
unsigned start,
unsigned end) {
#ifdef ENABLE_LOGGING_AND_PROFILING
- if (!Log::is_enabled() || !FLAG_prof) return;
+ if (!Log::IsEnabled() || !FLAG_prof) return;
LogMessageBuilder msg;
msg.Append("shared-library,\"%ls\",0x%08x,0x%08x\n", library_path,
start, end);
@@ -741,7 +468,7 @@ void Logger::LogRegExpSource(Handle<JSRegExp> regexp) {
void Logger::RegExpCompileEvent(Handle<JSRegExp> regexp, bool in_cache) {
#ifdef ENABLE_LOGGING_AND_PROFILING
- if (!Log::is_enabled() || !FLAG_log_regexp) return;
+ if (!Log::IsEnabled() || !FLAG_log_regexp) return;
LogMessageBuilder msg;
msg.Append("regexp-compile,");
LogRegExpSource(regexp);
@@ -753,7 +480,7 @@ void Logger::RegExpCompileEvent(Handle<JSRegExp> regexp, bool in_cache) {
void Logger::LogRuntime(Vector<const char> format, JSArray* args) {
#ifdef ENABLE_LOGGING_AND_PROFILING
- if (!Log::is_enabled() || !FLAG_log_runtime) return;
+ if (!Log::IsEnabled() || !FLAG_log_runtime) return;
HandleScope scope;
LogMessageBuilder msg;
for (int i = 0; i < format.length(); i++) {
@@ -794,7 +521,7 @@ void Logger::LogRuntime(Vector<const char> format, JSArray* args) {
void Logger::ApiIndexedSecurityCheck(uint32_t index) {
#ifdef ENABLE_LOGGING_AND_PROFILING
- if (!Log::is_enabled() || !FLAG_log_api) return;
+ if (!Log::IsEnabled() || !FLAG_log_api) return;
ApiEvent("api,check-security,%u\n", index);
#endif
}
@@ -805,7 +532,7 @@ void Logger::ApiNamedPropertyAccess(const char* tag,
Object* name) {
#ifdef ENABLE_LOGGING_AND_PROFILING
ASSERT(name->IsString());
- if (!Log::is_enabled() || !FLAG_log_api) return;
+ if (!Log::IsEnabled() || !FLAG_log_api) return;
String* class_name_obj = holder->class_name();
SmartPointer<char> class_name =
class_name_obj->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
@@ -819,7 +546,7 @@ void Logger::ApiIndexedPropertyAccess(const char* tag,
JSObject* holder,
uint32_t index) {
#ifdef ENABLE_LOGGING_AND_PROFILING
- if (!Log::is_enabled() || !FLAG_log_api) return;
+ if (!Log::IsEnabled() || !FLAG_log_api) return;
String* class_name_obj = holder->class_name();
SmartPointer<char> class_name =
class_name_obj->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
@@ -829,7 +556,7 @@ void Logger::ApiIndexedPropertyAccess(const char* tag,
void Logger::ApiObjectAccess(const char* tag, JSObject* object) {
#ifdef ENABLE_LOGGING_AND_PROFILING
- if (!Log::is_enabled() || !FLAG_log_api) return;
+ if (!Log::IsEnabled() || !FLAG_log_api) return;
String* class_name_obj = object->class_name();
SmartPointer<char> class_name =
class_name_obj->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
@@ -840,7 +567,7 @@ void Logger::ApiObjectAccess(const char* tag, JSObject* object) {
void Logger::ApiEntryCall(const char* name) {
#ifdef ENABLE_LOGGING_AND_PROFILING
- if (!Log::is_enabled() || !FLAG_log_api) return;
+ if (!Log::IsEnabled() || !FLAG_log_api) return;
Logger::ApiEvent("api,%s\n", name);
#endif
}
@@ -848,9 +575,9 @@ void Logger::ApiEntryCall(const char* name) {
void Logger::NewEvent(const char* name, void* object, size_t size) {
#ifdef ENABLE_LOGGING_AND_PROFILING
- if (!Log::is_enabled() || !FLAG_log) return;
+ if (!Log::IsEnabled() || !FLAG_log) return;
LogMessageBuilder msg;
- msg.Append("new,%s,0x%%"V8PRIp",%u\n", name, object,
+ msg.Append("new,%s,0x%" V8PRIxPTR ",%u\n", name, object,
static_cast<unsigned int>(size));
msg.WriteToLogFile();
#endif
@@ -859,9 +586,9 @@ void Logger::NewEvent(const char* name, void* object, size_t size) {
void Logger::DeleteEvent(const char* name, void* object) {
#ifdef ENABLE_LOGGING_AND_PROFILING
- if (!Log::is_enabled() || !FLAG_log) return;
+ if (!Log::IsEnabled() || !FLAG_log) return;
LogMessageBuilder msg;
- msg.Append("delete,%s,0x%%"V8PRIp"\n", name, object);
+ msg.Append("delete,%s,0x%" V8PRIxPTR "\n", name, object);
msg.WriteToLogFile();
#endif
}
@@ -869,9 +596,9 @@ void Logger::DeleteEvent(const char* name, void* object) {
void Logger::CodeCreateEvent(const char* tag, Code* code, const char* comment) {
#ifdef ENABLE_LOGGING_AND_PROFILING
- if (!Log::is_enabled() || !FLAG_log_code) return;
+ if (!Log::IsEnabled() || !FLAG_log_code) return;
LogMessageBuilder msg;
- msg.Append("code-creation,%s,0x%"V8PRIp",%d,\"", tag, code->address(),
+ msg.Append("code-creation,%s,0x%" V8PRIxPTR ",%d,\"", tag, code->address(),
code->ExecutableSize());
for (const char* p = comment; *p != '\0'; p++) {
if (*p == '"') {
@@ -888,12 +615,12 @@ void Logger::CodeCreateEvent(const char* tag, Code* code, const char* comment) {
void Logger::CodeCreateEvent(const char* tag, Code* code, String* name) {
#ifdef ENABLE_LOGGING_AND_PROFILING
- if (!Log::is_enabled() || !FLAG_log_code) return;
+ if (!Log::IsEnabled() || !FLAG_log_code) return;
LogMessageBuilder msg;
SmartPointer<char> str =
name->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
- msg.Append("code-creation,%s,0x%"V8PRIp",%d,\"%s\"\n", tag, code->address(),
- code->ExecutableSize(), *str);
+ msg.Append("code-creation,%s,0x%" V8PRIxPTR ",%d,\"%s\"\n",
+ tag, code->address(), code->ExecutableSize(), *str);
msg.WriteToLogFile();
#endif
}
@@ -902,13 +629,13 @@ void Logger::CodeCreateEvent(const char* tag, Code* code, String* name) {
void Logger::CodeCreateEvent(const char* tag, Code* code, String* name,
String* source, int line) {
#ifdef ENABLE_LOGGING_AND_PROFILING
- if (!Log::is_enabled() || !FLAG_log_code) return;
+ if (!Log::IsEnabled() || !FLAG_log_code) return;
LogMessageBuilder msg;
SmartPointer<char> str =
name->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
SmartPointer<char> sourcestr =
source->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
- msg.Append("code-creation,%s,0x%"V8PRIp",%d,\"%s %s:%d\"\n",
+ msg.Append("code-creation,%s,0x%" V8PRIxPTR ",%d,\"%s %s:%d\"\n",
tag, code->address(),
code->ExecutableSize(),
*str, *sourcestr, line);
@@ -919,9 +646,9 @@ void Logger::CodeCreateEvent(const char* tag, Code* code, String* name,
void Logger::CodeCreateEvent(const char* tag, Code* code, int args_count) {
#ifdef ENABLE_LOGGING_AND_PROFILING
- if (!Log::is_enabled() || !FLAG_log_code) return;
+ if (!Log::IsEnabled() || !FLAG_log_code) return;
LogMessageBuilder msg;
- msg.Append("code-creation,%s,0x%"V8PRIp",%d,\"args_count: %d\"\n", tag,
+ msg.Append("code-creation,%s,0x%" V8PRIxPTR ",%d,\"args_count: %d\"\n", tag,
code->address(),
code->ExecutableSize(),
args_count);
@@ -932,9 +659,9 @@ void Logger::CodeCreateEvent(const char* tag, Code* code, int args_count) {
void Logger::RegExpCodeCreateEvent(Code* code, String* source) {
#ifdef ENABLE_LOGGING_AND_PROFILING
- if (!Log::is_enabled() || !FLAG_log_code) return;
+ if (!Log::IsEnabled() || !FLAG_log_code) return;
LogMessageBuilder msg;
- msg.Append("code-creation,%s,0x%"V8PRIp",%d,\"", "RegExp",
+ msg.Append("code-creation,%s,0x%" V8PRIxPTR ",%d,\"", "RegExp",
code->address(),
code->ExecutableSize());
msg.AppendDetailed(source, false);
@@ -946,9 +673,11 @@ void Logger::RegExpCodeCreateEvent(Code* code, String* source) {
void Logger::CodeAllocateEvent(Code* code, Assembler* assem) {
#ifdef ENABLE_LOGGING_AND_PROFILING
- if (!Log::is_enabled() || !FLAG_log_code) return;
+ if (!Log::IsEnabled() || !FLAG_log_code) return;
LogMessageBuilder msg;
- msg.Append("code-allocate,0x%"V8PRIp",0x%"V8PRIp"\n", code->address(), assem);
+ msg.Append("code-allocate,0x%" V8PRIxPTR ",0x%" V8PRIxPTR "\n",
+ code->address(),
+ assem);
msg.WriteToLogFile();
#endif
}
@@ -956,9 +685,9 @@ void Logger::CodeAllocateEvent(Code* code, Assembler* assem) {
void Logger::CodeMoveEvent(Address from, Address to) {
#ifdef ENABLE_LOGGING_AND_PROFILING
- if (!Log::is_enabled() || !FLAG_log_code) return;
+ if (!Log::IsEnabled() || !FLAG_log_code) return;
LogMessageBuilder msg;
- msg.Append("code-move,0x%"V8PRIp",0x%"V8PRIp"\n", from, to);
+ msg.Append("code-move,0x%" V8PRIxPTR ",0x%" V8PRIxPTR "\n", from, to);
msg.WriteToLogFile();
#endif
}
@@ -966,9 +695,9 @@ void Logger::CodeMoveEvent(Address from, Address to) {
void Logger::CodeDeleteEvent(Address from) {
#ifdef ENABLE_LOGGING_AND_PROFILING
- if (!Log::is_enabled() || !FLAG_log_code) return;
+ if (!Log::IsEnabled() || !FLAG_log_code) return;
LogMessageBuilder msg;
- msg.Append("code-delete,0x%"V8PRIp"\n", from);
+ msg.Append("code-delete,0x%" V8PRIxPTR "\n", from);
msg.WriteToLogFile();
#endif
}
@@ -976,7 +705,7 @@ void Logger::CodeDeleteEvent(Address from) {
void Logger::ResourceEvent(const char* name, const char* tag) {
#ifdef ENABLE_LOGGING_AND_PROFILING
- if (!Log::is_enabled() || !FLAG_log) return;
+ if (!Log::IsEnabled() || !FLAG_log) return;
LogMessageBuilder msg;
msg.Append("%s,%s,", name, tag);
@@ -994,7 +723,7 @@ void Logger::ResourceEvent(const char* name, const char* tag) {
void Logger::SuspectReadEvent(String* name, Object* obj) {
#ifdef ENABLE_LOGGING_AND_PROFILING
- if (!Log::is_enabled() || !FLAG_log_suspect) return;
+ if (!Log::IsEnabled() || !FLAG_log_suspect) return;
LogMessageBuilder msg;
String* class_name = obj->IsJSObject()
? JSObject::cast(obj)->class_name()
@@ -1013,7 +742,7 @@ void Logger::SuspectReadEvent(String* name, Object* obj) {
void Logger::HeapSampleBeginEvent(const char* space, const char* kind) {
#ifdef ENABLE_LOGGING_AND_PROFILING
- if (!Log::is_enabled() || !FLAG_log_gc) return;
+ if (!Log::IsEnabled() || !FLAG_log_gc) return;
LogMessageBuilder msg;
msg.Append("heap-sample-begin,\"%s\",\"%s\"\n", space, kind);
msg.WriteToLogFile();
@@ -1023,7 +752,7 @@ void Logger::HeapSampleBeginEvent(const char* space, const char* kind) {
void Logger::HeapSampleEndEvent(const char* space, const char* kind) {
#ifdef ENABLE_LOGGING_AND_PROFILING
- if (!Log::is_enabled() || !FLAG_log_gc) return;
+ if (!Log::IsEnabled() || !FLAG_log_gc) return;
LogMessageBuilder msg;
msg.Append("heap-sample-end,\"%s\",\"%s\"\n", space, kind);
msg.WriteToLogFile();
@@ -1033,7 +762,7 @@ void Logger::HeapSampleEndEvent(const char* space, const char* kind) {
void Logger::HeapSampleItemEvent(const char* type, int number, int bytes) {
#ifdef ENABLE_LOGGING_AND_PROFILING
- if (!Log::is_enabled() || !FLAG_log_gc) return;
+ if (!Log::IsEnabled() || !FLAG_log_gc) return;
LogMessageBuilder msg;
msg.Append("heap-sample-item,%s,%d,%d\n", type, number, bytes);
msg.WriteToLogFile();
@@ -1043,7 +772,7 @@ void Logger::HeapSampleItemEvent(const char* type, int number, int bytes) {
void Logger::DebugTag(const char* call_site_tag) {
#ifdef ENABLE_LOGGING_AND_PROFILING
- if (!Log::is_enabled() || !FLAG_log) return;
+ if (!Log::IsEnabled() || !FLAG_log) return;
LogMessageBuilder msg;
msg.Append("debug-tag,%s\n", call_site_tag);
msg.WriteToLogFile();
@@ -1053,7 +782,7 @@ void Logger::DebugTag(const char* call_site_tag) {
void Logger::DebugEvent(const char* event_type, Vector<uint16_t> parameter) {
#ifdef ENABLE_LOGGING_AND_PROFILING
- if (!Log::is_enabled() || !FLAG_log) return;
+ if (!Log::IsEnabled() || !FLAG_log) return;
StringBuilder s(parameter.length() + 1);
for (int i = 0; i < parameter.length(); ++i) {
s.AddCharacter(static_cast<char>(parameter[i]));
@@ -1072,15 +801,15 @@ void Logger::DebugEvent(const char* event_type, Vector<uint16_t> parameter) {
#ifdef ENABLE_LOGGING_AND_PROFILING
void Logger::TickEvent(TickSample* sample, bool overflow) {
- if (!Log::is_enabled() || !FLAG_prof) return;
+ if (!Log::IsEnabled() || !FLAG_prof) return;
LogMessageBuilder msg;
- msg.Append("tick,0x%"V8PRIp",0x%"V8PRIp",%d", sample->pc, sample->sp,
- static_cast<int>(sample->state));
+ msg.Append("tick,0x%" V8PRIxPTR ",0x%" V8PRIxPTR ",%d",
+ sample->pc, sample->sp, static_cast<int>(sample->state));
if (overflow) {
msg.Append(",overflow");
}
for (int i = 0; i < sample->frames_count; ++i) {
- msg.Append(",0x%"V8PRIp, sample->stack[i]);
+ msg.Append(",0x%" V8PRIxPTR, sample->stack[i]);
}
msg.Append('\n');
msg.WriteToLogFile();
@@ -1093,19 +822,113 @@ bool Logger::IsProfilerPaused() {
void Logger::PauseProfiler() {
+ if (profiler_->paused()) {
+ return;
+ }
profiler_->pause();
+ if (FLAG_prof_lazy) {
+ if (!FLAG_sliding_state_window) ticker_->Stop();
+ FLAG_log_code = false;
+ // Must be the same message as Log::kDynamicBufferSeal.
+ LOG(UncheckedStringEvent("profiler", "pause"));
+ }
}
void Logger::ResumeProfiler() {
+ if (!profiler_->paused() || !Log::IsEnabled()) {
+ return;
+ }
+ if (FLAG_prof_lazy) {
+ LOG(UncheckedStringEvent("profiler", "resume"));
+ FLAG_log_code = true;
+ LogCompiledFunctions();
+ if (!FLAG_sliding_state_window) ticker_->Start();
+ }
profiler_->resume();
}
+// This function can be called when Log's mutex is acquired,
+// either from main or Profiler's thread.
+void Logger::StopLoggingAndProfiling() {
+ Log::stop();
+ PauseProfiler();
+}
+
+
+bool Logger::IsProfilerSamplerActive() {
+ return ticker_->IsActive();
+}
+
+
int Logger::GetLogLines(int from_pos, char* dest_buf, int max_size) {
return Log::GetLogLines(from_pos, dest_buf, max_size);
}
+
+void Logger::LogCompiledFunctions() {
+ HandleScope scope;
+ Handle<SharedFunctionInfo>* sfis = NULL;
+ int compiled_funcs_count = 0;
+
+ {
+ AssertNoAllocation no_alloc;
+
+ HeapIterator iterator;
+ while (iterator.has_next()) {
+ HeapObject* obj = iterator.next();
+ ASSERT(obj != NULL);
+ if (obj->IsSharedFunctionInfo()
+ && SharedFunctionInfo::cast(obj)->is_compiled()) {
+ ++compiled_funcs_count;
+ }
+ }
+
+ sfis = NewArray< Handle<SharedFunctionInfo> >(compiled_funcs_count);
+ iterator.reset();
+
+ int i = 0;
+ while (iterator.has_next()) {
+ HeapObject* obj = iterator.next();
+ ASSERT(obj != NULL);
+ if (obj->IsSharedFunctionInfo()
+ && SharedFunctionInfo::cast(obj)->is_compiled()) {
+ sfis[i++] = Handle<SharedFunctionInfo>(SharedFunctionInfo::cast(obj));
+ }
+ }
+ }
+
+ // During iteration, there can be heap allocation due to
+ // GetScriptLineNumber call.
+ for (int i = 0; i < compiled_funcs_count; ++i) {
+ Handle<SharedFunctionInfo> shared = sfis[i];
+ Handle<String> name(String::cast(shared->name()));
+ Handle<String> func_name(name->length() > 0 ?
+ *name : shared->inferred_name());
+ if (shared->script()->IsScript()) {
+ Handle<Script> script(Script::cast(shared->script()));
+ if (script->name()->IsString()) {
+ Handle<String> script_name(String::cast(script->name()));
+ int line_num = GetScriptLineNumber(script, shared->start_position());
+ if (line_num > 0) {
+ line_num += script->line_offset()->value() + 1;
+ LOG(CodeCreateEvent("LazyCompile", shared->code(), *func_name,
+ *script_name, line_num));
+ } else {
+ // Can't distinguish enum and script here, so always use Script.
+ LOG(CodeCreateEvent("Script", shared->code(), *script_name));
+ }
+ continue;
+ }
+ }
+ // If no script or script has no name.
+ LOG(CodeCreateEvent("LazyCompile", shared->code(), *func_name));
+ }
+
+ DeleteArray(sfis);
+}
+
#endif
@@ -1125,9 +948,15 @@ bool Logger::Setup() {
// --prof implies --log-code.
if (FLAG_prof) FLAG_log_code = true;
+ // --prof_lazy controls --log-code, implies --noprof_auto.
+ if (FLAG_prof_lazy) {
+ FLAG_log_code = false;
+ FLAG_prof_auto = false;
+ }
+
bool open_log_file = FLAG_log || FLAG_log_runtime || FLAG_log_api
|| FLAG_log_code || FLAG_log_gc || FLAG_log_handles || FLAG_log_suspect
- || FLAG_log_regexp || FLAG_log_state_changes;
+ || FLAG_log_regexp || FLAG_log_state_changes || FLAG_prof_lazy;
// If we're logging anything, we need to open the log file.
if (open_log_file) {
@@ -1178,10 +1007,7 @@ bool Logger::Setup() {
current_state_ = &bottom_state_;
- // as log is initialized early with V8, we can assume that JS execution
- // frames can never reach this point on stack
- int stack_var;
- ticker_ = new Ticker(1, reinterpret_cast<uintptr_t>(&stack_var));
+ ticker_ = new Ticker(kSamplingIntervalMs);
if (FLAG_sliding_state_window && sliding_state_window_ == NULL) {
sliding_state_window_ = new SlidingStateWindow();
@@ -1194,6 +1020,8 @@ bool Logger::Setup() {
profiler_->Engage();
}
+ LogMessageBuilder::set_write_failure_handler(StopLoggingAndProfiling);
+
return true;
#else
@@ -1204,6 +1032,8 @@ bool Logger::Setup() {
void Logger::TearDown() {
#ifdef ENABLE_LOGGING_AND_PROFILING
+ LogMessageBuilder::set_write_failure_handler(NULL);
+
// Stop the profiler before closing the file.
if (profiler_ != NULL) {
profiler_->Disengage();
@@ -1212,8 +1042,10 @@ void Logger::TearDown() {
}
delete sliding_state_window_;
+ sliding_state_window_ = NULL;
delete ticker_;
+ ticker_ = NULL;
Log::Close();
#endif
@@ -1264,7 +1096,7 @@ static const char* StateToString(StateTag state) {
VMState::VMState(StateTag state) {
#if !defined(ENABLE_HEAP_PROTECTION)
// When not protecting the heap, there is no difference between
- // EXTERNAL and OTHER. As an optimizatin in that case, we will not
+ // EXTERNAL and OTHER. As an optimization in that case, we will not
// perform EXTERNAL->OTHER transitions through the API. We thus
// compress the two states into one.
if (state == EXTERNAL) state = OTHER;
diff --git a/deps/v8/src/log.h b/deps/v8/src/log.h
index 5f3c188c5..2f8f81c95 100644
--- a/deps/v8/src/log.h
+++ b/deps/v8/src/log.h
@@ -28,7 +28,8 @@
#ifndef V8_LOG_H_
#define V8_LOG_H_
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
// Logger is used for collecting logging information from V8 during
// execution. The result is dumped to a file.
@@ -75,7 +76,7 @@ class LogMessageBuilder;
#ifdef ENABLE_LOGGING_AND_PROFILING
#define LOG(Call) \
do { \
- if (v8::internal::Logger::is_enabled()) \
+ if (v8::internal::Logger::IsEnabled()) \
v8::internal::Logger::Call; \
} while (false)
#else
@@ -201,7 +202,7 @@ class Logger {
return current_state_ ? current_state_->state() : OTHER;
}
- static bool is_enabled();
+ static bool IsEnabled();
// Pause/Resume collection of profiling data.
// When data collection is paused, Tick events are discarded until
@@ -214,8 +215,17 @@ class Logger {
// retrieve previously written messages. See v8.h.
static int GetLogLines(int from_pos, char* dest_buf, int max_size);
+ // Logs all compiled functions found in the heap.
+ static void LogCompiledFunctions();
+
private:
+ // Profiler's sampling interval (in milliseconds).
+ static const int kSamplingIntervalMs = 1;
+
+ // Emits the profiler's first message.
+ static void ProfilerBeginEvent();
+
// Emits the source code of a regexp. Used by regexp events.
static void LogRegExpSource(Handle<JSRegExp> regexp);
@@ -227,6 +237,12 @@ class Logger {
// Logs a StringEvent regardless of whether FLAG_log is true.
static void UncheckedStringEvent(const char* name, const char* value);
+ // Stops logging and profiling in case of insufficient resources.
+ static void StopLoggingAndProfiling();
+
+ // Returns whether profiler's sampler is active.
+ static bool IsProfilerSamplerActive();
+
// The sampler used by the profiler and the sliding state window.
static Ticker* ticker_;
@@ -252,6 +268,8 @@ class Logger {
friend class Profiler;
friend class SlidingStateWindow;
friend class VMState;
+
+ friend class LoggerTestHelper;
#else
static bool is_enabled() { return false; }
#endif
@@ -259,14 +277,9 @@ class Logger {
// Class that extracts stack trace, used for profiling.
-class StackTracer BASE_EMBEDDED {
+class StackTracer : public AllStatic {
public:
- explicit StackTracer(uintptr_t low_stack_bound)
- : low_stack_bound_(low_stack_bound) { }
- void Trace(TickSample* sample);
- private:
-
- uintptr_t low_stack_bound_;
+ static void Trace(TickSample* sample);
};
diff --git a/deps/v8/src/mark-compact.cc b/deps/v8/src/mark-compact.cc
index 48774ec5b..56e4ea6ca 100644
--- a/deps/v8/src/mark-compact.cc
+++ b/deps/v8/src/mark-compact.cc
@@ -33,7 +33,8 @@
#include "mark-compact.h"
#include "stub-cache.h"
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
// -------------------------------------------------------------------------
// MarkCompactCollector
@@ -471,7 +472,7 @@ void MarkCompactCollector::MarkMapContents(Map* map) {
void MarkCompactCollector::MarkDescriptorArray(
- DescriptorArray *descriptors) {
+ DescriptorArray* descriptors) {
if (descriptors->IsMarked()) return;
// Empty descriptor array is marked as a root before any maps are marked.
ASSERT(descriptors != Heap::empty_descriptor_array());
@@ -871,7 +872,7 @@ void MarkCompactCollector::ClearNonLiveTransitions() {
// clearing map transitions when necessary.
current = map;
bool on_dead_path = !current->IsMarked();
- Object *next;
+ Object* next;
while (SafeIsMap(current)) {
next = current->prototype();
// There should never be a dead map above a live map.
diff --git a/deps/v8/src/mark-compact.h b/deps/v8/src/mark-compact.h
index bfa2c3ce5..d7ad63013 100644
--- a/deps/v8/src/mark-compact.h
+++ b/deps/v8/src/mark-compact.h
@@ -28,7 +28,8 @@
#ifndef V8_MARK_COMPACT_H_
#define V8_MARK_COMPACT_H_
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
// Callback function, returns whether an object is alive. The heap size
// of the object is returned in size. It optionally updates the offset
diff --git a/deps/v8/src/memory.h b/deps/v8/src/memory.h
index 2397bc6c7..c64699ee3 100644
--- a/deps/v8/src/memory.h
+++ b/deps/v8/src/memory.h
@@ -28,13 +28,18 @@
#ifndef V8_MEMORY_H_
#define V8_MEMORY_H_
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
// Memory provides an interface to 'raw' memory. It encapsulates the casts
// that typically are needed when incompatible pointer types are used.
class Memory {
public:
+ static uint16_t& uint16_at(Address addr) {
+ return *reinterpret_cast<uint16_t*>(addr);
+ }
+
static uint32_t& uint32_at(Address addr) {
return *reinterpret_cast<uint32_t*>(addr);
}
@@ -43,6 +48,10 @@ class Memory {
return *reinterpret_cast<int32_t*>(addr);
}
+ static uint64_t& uint64_at(Address addr) {
+ return *reinterpret_cast<uint64_t*>(addr);
+ }
+
static int& int_at(Address addr) {
return *reinterpret_cast<int*>(addr);
}
diff --git a/deps/v8/src/messages.cc b/deps/v8/src/messages.cc
index ca0ce2a54..a3fffcb50 100644
--- a/deps/v8/src/messages.cc
+++ b/deps/v8/src/messages.cc
@@ -33,7 +33,8 @@
#include "spaces-inl.h"
#include "top.h"
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
// If no message listeners have been registered this one is called
diff --git a/deps/v8/src/messages.h b/deps/v8/src/messages.h
index 1ff10aae8..80ce8eb9c 100644
--- a/deps/v8/src/messages.h
+++ b/deps/v8/src/messages.h
@@ -36,7 +36,8 @@
#include "handles-inl.h"
// Forward declaration of MessageLocation.
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
class MessageLocation;
} } // namespace v8::internal
@@ -57,7 +58,8 @@ class V8Message {
};
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
struct Language;
class SourceInfo;
diff --git a/deps/v8/src/mirror-delay.js b/deps/v8/src/mirror-delay.js
index 30f19f0a0..f5a12c793 100644
--- a/deps/v8/src/mirror-delay.js
+++ b/deps/v8/src/mirror-delay.js
@@ -29,8 +29,7 @@
// Touch the RegExp and Date functions to make sure that date-delay.js and
// regexp-delay.js has been loaded. This is required as the mirrors use
-// functions within these files through the builtins object. See the
-// function DateToISO8601_ as an example.
+// functions within these files through the builtins object.
RegExp;
Date;
@@ -935,7 +934,8 @@ inherits(DateMirror, ObjectMirror);
DateMirror.prototype.toText = function() {
- return DateToISO8601_(this.value_);
+ var s = JSON.stringify(this.value_);
+ return s.substring(1, s.length - 1); // cut quotes
}
@@ -1617,6 +1617,11 @@ ScriptMirror.prototype.scriptType = function() {
};
+ScriptMirror.prototype.compilationType = function() {
+ return this.script_.compilation_type;
+};
+
+
ScriptMirror.prototype.lineCount = function() {
return this.script_.lineCount();
};
@@ -1638,6 +1643,20 @@ ScriptMirror.prototype.context = function() {
};
+ScriptMirror.prototype.evalFromFunction = function() {
+ return MakeMirror(this.script_.eval_from_function);
+};
+
+
+ScriptMirror.prototype.evalFromLocation = function() {
+ var eval_from_function = this.evalFromFunction();
+ if (!eval_from_function.isUndefined()) {
+ var position = this.script_.eval_from_position;
+ return eval_from_function.script().locationFromPosition(position, true);
+ }
+};
+
+
ScriptMirror.prototype.toText = function() {
var result = '';
result += this.name();
@@ -1728,12 +1747,13 @@ JSONProtocolSerializer.prototype.serializeValue = function(mirror) {
/**
* Returns a serialization of all the objects referenced.
*
- * @param {Mirror} mirror The mirror to serialize
- * @returns {String} JSON serialization
+ * @param {Mirror} mirror The mirror to serialize.
+ * @returns {Array.<Object>} Array of the referenced objects converted to
+ * protcol objects.
*/
JSONProtocolSerializer.prototype.serializeReferencedObjects = function() {
- // Collect the JSON serialization of the referenced objects in an array.
- var content = new Array();
+ // Collect the protocol representation of the referenced objects in an array.
+ var content = [];
// Get the number of referenced objects.
var count = this.mirrors_.length;
@@ -1742,8 +1762,7 @@ JSONProtocolSerializer.prototype.serializeReferencedObjects = function() {
content.push(this.serialize_(this.mirrors_[i], false, false));
}
- var json = ArrayToJSONArray_(content);
- return json;
+ return content;
}
@@ -1752,6 +1771,11 @@ JSONProtocolSerializer.prototype.includeSource_ = function() {
}
+JSONProtocolSerializer.prototype.compactFormat_ = function() {
+ return this.options_ && this.options_.compactFormat;
+}
+
+
JSONProtocolSerializer.prototype.add_ = function(mirror) {
// If this mirror is already in the list just return.
for (var i = 0; i < this.mirrors_.length; i++) {
@@ -1765,26 +1789,70 @@ JSONProtocolSerializer.prototype.add_ = function(mirror) {
}
+/**
+ * Formats mirror object to protocol reference object with some data that can
+ * be used to display the value in debugger.
+ * @param {Mirror} mirror Mirror to serialize.
+ * @return {Object} Protocol reference object.
+ */
+JSONProtocolSerializer.prototype.serializeReferenceWithDisplayData_ =
+ function(mirror) {
+ var o = {};
+ o.ref = mirror.handle();
+ o.type = mirror.type();
+ switch (mirror.type()) {
+ case UNDEFINED_TYPE:
+ case NULL_TYPE:
+ case BOOLEAN_TYPE:
+ case NUMBER_TYPE:
+ o.value = mirror.value();
+ break;
+ case STRING_TYPE:
+ // Limit string length.
+ o.value = mirror.toText();
+ break;
+ case FUNCTION_TYPE:
+ o.name = mirror.name();
+ o.inferredName = mirror.inferredName();
+ if (mirror.script()) {
+ o.scriptId = mirror.script().id();
+ }
+ break;
+ case ERROR_TYPE:
+ case REGEXP_TYPE:
+ o.value = mirror.toText();
+ break;
+ case OBJECT_TYPE:
+ o.className = mirror.className();
+ break;
+ }
+ return o;
+};
+
JSONProtocolSerializer.prototype.serialize_ = function(mirror, reference,
details) {
// If serializing a reference to a mirror just return the reference and add
// the mirror to the referenced mirrors.
if (reference &&
(mirror.isValue() || mirror.isScript() || mirror.isContext())) {
- this.add_(mirror);
- return '{"ref":' + mirror.handle() + '}';
+ if (this.compactFormat_() && mirror.isValue()) {
+ return this.serializeReferenceWithDisplayData_(mirror);
+ } else {
+ this.add_(mirror);
+ return {'ref' : mirror.handle()};
+ }
}
- // Collect the JSON property/value pairs in an array.
- var content = new Array();
+ // Collect the JSON property/value pairs.
+ var content = {};
// Add the mirror handle.
if (mirror.isValue() || mirror.isScript() || mirror.isContext()) {
- content.push(MakeJSONPair_('handle', NumberToJSON_(mirror.handle())));
+ content.handle = mirror.handle();
}
// Always add the type.
- content.push(MakeJSONPair_('type', StringToJSON_(mirror.type())));
+ content.type = mirror.type();
switch (mirror.type()) {
case UNDEFINED_TYPE:
@@ -1794,26 +1862,25 @@ JSONProtocolSerializer.prototype.serialize_ = function(mirror, reference,
case BOOLEAN_TYPE:
// Boolean values are simply represented by their value.
- content.push(MakeJSONPair_('value', BooleanToJSON_(mirror.value())));
+ content.value = mirror.value();
break;
case NUMBER_TYPE:
// Number values are simply represented by their value.
- content.push(MakeJSONPair_('value', NumberToJSON_(mirror.value())));
+ content.value = NumberToJSON_(mirror.value());
break;
case STRING_TYPE:
// String values might have their value cropped to keep down size.
if (mirror.length() > kMaxProtocolStringLength) {
var str = mirror.value().substring(0, kMaxProtocolStringLength);
- content.push(MakeJSONPair_('value', StringToJSON_(str)));
- content.push(MakeJSONPair_('fromIndex', NumberToJSON_(0)));
- content.push(MakeJSONPair_('toIndex',
- NumberToJSON_(kMaxProtocolStringLength)));
+ content.value = str;
+ content.fromIndex = 0;
+ content.toIndex = kMaxProtocolStringLength;
} else {
- content.push(MakeJSONPair_('value', StringToJSON_(mirror.value())));
+ content.value = mirror.value();
}
- content.push(MakeJSONPair_('length', NumberToJSON_(mirror.length())));
+ content.length = mirror.length();
break;
case OBJECT_TYPE:
@@ -1836,46 +1903,46 @@ JSONProtocolSerializer.prototype.serialize_ = function(mirror, reference,
case SCRIPT_TYPE:
// Script is represented by id, name and source attributes.
if (mirror.name()) {
- content.push(MakeJSONPair_('name', StringToJSON_(mirror.name())));
+ content.name = mirror.name();
}
- content.push(MakeJSONPair_('id', NumberToJSON_(mirror.id())));
- content.push(MakeJSONPair_('lineOffset',
- NumberToJSON_(mirror.lineOffset())));
- content.push(MakeJSONPair_('columnOffset',
- NumberToJSON_(mirror.columnOffset())));
- content.push(MakeJSONPair_('lineCount',
- NumberToJSON_(mirror.lineCount())));
+ content.id = mirror.id();
+ content.lineOffset = mirror.lineOffset();
+ content.columnOffset = mirror.columnOffset();
+ content.lineCount = mirror.lineCount();
if (mirror.data()) {
- content.push(MakeJSONPair_('data', JSON.stringify(mirror.data())));
+ content.data = mirror.data();
}
if (this.includeSource_()) {
- content.push(MakeJSONPair_('source',
- StringToJSON_(mirror.source())));
+ content.source = mirror.source();
} else {
var sourceStart = mirror.source().substring(0, 80);
- content.push(MakeJSONPair_('sourceStart',
- StringToJSON_(sourceStart)));
+ content.sourceStart = sourceStart;
+ }
+ content.sourceLength = mirror.source().length;
+ content.scriptType = mirror.scriptType();
+ content.compilationType = mirror.compilationType();
+ if (mirror.compilationType() == 1) { // Compilation type eval.
+ content.evalFromScript =
+ this.serializeReference(mirror.evalFromFunction().script());
+ var evalFromLocation = mirror.evalFromLocation()
+ content.evalFromLocation = { line: evalFromLocation.line,
+ column: evalFromLocation.column}
}
- content.push(MakeJSONPair_('sourceLength',
- NumberToJSON_(mirror.source().length)));
- content.push(MakeJSONPair_('scriptType',
- NumberToJSON_(mirror.scriptType())));
if (mirror.context()) {
- content.push(MakeJSONPair_('context',
- this.serializeReference(mirror.context())));
+ content.context = this.serializeReference(mirror.context());
}
break;
case CONTEXT_TYPE:
- content.push(MakeJSONPair_('data', JSON.stringify(mirror.data())));
+ content.data = mirror.data();
break;
}
// Always add the text representation.
- content.push(MakeJSONPair_('text', StringToJSON_(mirror.toText())));
+ content.text = mirror.toText();
// Create and return the JSON string.
- return ArrayToJSONObject_(content);
+ return content;
}
@@ -1893,44 +1960,40 @@ JSONProtocolSerializer.prototype.serialize_ = function(mirror, reference,
JSONProtocolSerializer.prototype.serializeObject_ = function(mirror, content,
details) {
// Add general object properties.
- content.push(MakeJSONPair_('className',
- StringToJSON_(mirror.className())));
- content.push(MakeJSONPair_('constructorFunction',
- this.serializeReference(mirror.constructorFunction())));
- content.push(MakeJSONPair_('protoObject',
- this.serializeReference(mirror.protoObject())));
- content.push(MakeJSONPair_('prototypeObject',
- this.serializeReference(mirror.prototypeObject())));
+ content.className = mirror.className();
+ content.constructorFunction =
+ this.serializeReference(mirror.constructorFunction());
+ content.protoObject = this.serializeReference(mirror.protoObject());
+ content.prototypeObject = this.serializeReference(mirror.prototypeObject());
// Add flags to indicate whether there are interceptors.
if (mirror.hasNamedInterceptor()) {
- content.push(MakeJSONPair_('namedInterceptor', BooleanToJSON_(true)));
+ content.namedInterceptor = true;
}
if (mirror.hasIndexedInterceptor()) {
- content.push(MakeJSONPair_('indexedInterceptor', BooleanToJSON_(true)));
+ content.indexedInterceptor = true;
}
// Add function specific properties.
if (mirror.isFunction()) {
// Add function specific properties.
- content.push(MakeJSONPair_('name', StringToJSON_(mirror.name())));
+ content.name = mirror.name();
if (!IS_UNDEFINED(mirror.inferredName())) {
- content.push(MakeJSONPair_('inferredName',
- StringToJSON_(mirror.inferredName())));
+ content.inferredName = mirror.inferredName();
}
- content.push(MakeJSONPair_('resolved', BooleanToJSON_(mirror.resolved())));
+ content.resolved = mirror.resolved();
if (mirror.resolved()) {
- content.push(MakeJSONPair_('source', StringToJSON_(mirror.source())));
+ content.source = mirror.source();
}
if (mirror.script()) {
- content.push(MakeJSONPair_('script', this.serializeReference(mirror.script())));
+ content.script = this.serializeReference(mirror.script());
}
}
// Add date specific properties.
if (mirror.isDate()) {
// Add date specific properties.
- content.push(MakeJSONPair_('value', DateToJSON_(mirror.value())));
+ content.value = mirror.value();
}
// Add actual properties - named properties followed by indexed properties.
@@ -1938,20 +2001,20 @@ JSONProtocolSerializer.prototype.serializeObject_ = function(mirror, content,
var propertyIndexes = mirror.propertyNames(PropertyKind.Indexed);
var p = new Array(propertyNames.length + propertyIndexes.length);
for (var i = 0; i < propertyNames.length; i++) {
- var property_mirror = mirror.property(propertyNames[i]);
- p[i] = this.serializeProperty_(property_mirror);
+ var propertyMirror = mirror.property(propertyNames[i]);
+ p[i] = this.serializeProperty_(propertyMirror);
if (details) {
- this.add_(property_mirror.value());
+ this.add_(propertyMirror.value());
}
}
for (var i = 0; i < propertyIndexes.length; i++) {
- var property_mirror = mirror.property(propertyIndexes[i]);
- p[propertyNames.length + i] = this.serializeProperty_(property_mirror);
+ var propertyMirror = mirror.property(propertyIndexes[i]);
+ p[propertyNames.length + i] = this.serializeProperty_(propertyMirror);
if (details) {
- this.add_(property_mirror.value());
+ this.add_(propertyMirror.value());
}
}
- content.push(MakeJSONPair_('properties', ArrayToJSONArray_(p)));
+ content.properties = p;
}
@@ -1971,207 +2034,93 @@ JSONProtocolSerializer.prototype.serializeObject_ = function(mirror, content,
* {"name":"hello","ref":1}
* {"name":"length","attributes":7,"propertyType":3,"ref":2}
*
- * @param {PropertyMirror} property_mirror The property to serialize
- * @returns {String} JSON serialization
+ * @param {PropertyMirror} propertyMirror The property to serialize.
+ * @returns {Object} Protocol object representing the property.
*/
-JSONProtocolSerializer.prototype.serializeProperty_ = function(property_mirror) {
- var builder = new builtins.StringBuilder();
- builder.add('{"name":');
- builder.add(StringToJSON_(property_mirror.name()));
- if (property_mirror.attributes() != PropertyAttribute.None) {
- builder.add(',"attributes":');
- builder.add(NumberToJSON_(property_mirror.attributes()));
- }
- if (property_mirror.propertyType() != PropertyType.Normal) {
- builder.add(',"propertyType":');
- builder.add(NumberToJSON_(property_mirror.propertyType()));
+JSONProtocolSerializer.prototype.serializeProperty_ = function(propertyMirror) {
+ var result = {};
+
+ result.name = propertyMirror.name();
+ var propertyValue = propertyMirror.value();
+ if (this.compactFormat_() && propertyValue.isValue()) {
+ result.value = this.serializeReferenceWithDisplayData_(propertyValue);
+ } else {
+ if (propertyMirror.attributes() != PropertyAttribute.None) {
+ result.attributes = propertyMirror.attributes();
+ }
+ if (propertyMirror.propertyType() != PropertyType.Normal) {
+ result.propertyType = propertyMirror.propertyType();
+ }
+ result.ref = propertyValue.handle();
}
- builder.add(',"ref":');
- builder.add(NumberToJSON_(property_mirror.value().handle()));
- builder.add('}');
- return builder.generate();
+ return result;
}
JSONProtocolSerializer.prototype.serializeFrame_ = function(mirror, content) {
- content.push(MakeJSONPair_('index', NumberToJSON_(mirror.index())));
- content.push(MakeJSONPair_('receiver',
- this.serializeReference(mirror.receiver())));
+ content.index = mirror.index();
+ content.receiver = this.serializeReference(mirror.receiver());
var func = mirror.func();
- content.push(MakeJSONPair_('func', this.serializeReference(func)));
+ content.func = this.serializeReference(func);
if (func.script()) {
- content.push(MakeJSONPair_('script',
- this.serializeReference(func.script())));
+ content.script = this.serializeReference(func.script());
}
- content.push(MakeJSONPair_('constructCall',
- BooleanToJSON_(mirror.isConstructCall())));
- content.push(MakeJSONPair_('debuggerFrame',
- BooleanToJSON_(mirror.isDebuggerFrame())));
+ content.constructCall = mirror.isConstructCall();
+ content.debuggerFrame = mirror.isDebuggerFrame();
var x = new Array(mirror.argumentCount());
for (var i = 0; i < mirror.argumentCount(); i++) {
- arg = new Array();
+ var arg = {};
var argument_name = mirror.argumentName(i)
if (argument_name) {
- arg.push(MakeJSONPair_('name', StringToJSON_(argument_name)));
+ arg.name = argument_name;
}
- arg.push(MakeJSONPair_('value',
- this.serializeReference(mirror.argumentValue(i))));
- x[i] = ArrayToJSONObject_(arg);
+ arg.value = this.serializeReference(mirror.argumentValue(i));
+ x[i] = arg;
}
- content.push(MakeJSONPair_('arguments', ArrayToJSONArray_(x)));
+ content.arguments = x;
var x = new Array(mirror.localCount());
for (var i = 0; i < mirror.localCount(); i++) {
- var name = MakeJSONPair_('name', StringToJSON_(mirror.localName(i)));
- var value = MakeJSONPair_('value',
- this.serializeReference(mirror.localValue(i)));
- x[i] = '{' + name + ',' + value + '}';
+ var local = {};
+ local.name = mirror.localName(i);
+ local.value = this.serializeReference(mirror.localValue(i));
+ x[i] = local;
}
- content.push(MakeJSONPair_('locals', ArrayToJSONArray_(x)));
- content.push(MakeJSONPair_('position',
- NumberToJSON_(mirror.sourcePosition())));
+ content.locals = x;
+ content.position = mirror.sourcePosition();
var line = mirror.sourceLine();
if (!IS_UNDEFINED(line)) {
- content.push(MakeJSONPair_('line', NumberToJSON_(line)));
+ content.line = line;
}
var column = mirror.sourceColumn();
if (!IS_UNDEFINED(column)) {
- content.push(MakeJSONPair_('column', NumberToJSON_(column)));
+ content.column = column;
}
var source_line_text = mirror.sourceLineText();
if (!IS_UNDEFINED(source_line_text)) {
- content.push(MakeJSONPair_('sourceLineText',
- StringToJSON_(source_line_text)));
+ content.sourceLineText = source_line_text;
}
}
-function MakeJSONPair_(name, value) {
- return '"' + name + '":' + value;
-}
-
-
-function ArrayToJSONObject_(content) {
- return '{' + content.join(',') + '}';
-}
-
-
-function ArrayToJSONArray_(content) {
- return '[' + content.join(',') + ']';
-}
-
-
-function BooleanToJSON_(value) {
- return String(value);
-}
-
-
/**
- * Convert a number to a JSON string value. For all finite numbers the number
- * literal representation is used. For non finite numbers NaN, Infinite and
+ * Convert a number to a protocol value. For all finite numbers the number
+ * itself is returned. For non finite numbers NaN, Infinite and
* -Infinite the string representation "NaN", "Infinite" or "-Infinite"
- * (including the quotes) is returned.
+ * (not including the quotes) is returned.
*
- * @param {number} value The number value to convert to a JSON value
- * @returns {String} JSON value
+ * @param {number} value The number value to convert to a protocol value.
+ * @returns {number|string} Protocol value.
*/
function NumberToJSON_(value) {
if (isNaN(value)) {
- return '"NaN"';
+ return 'NaN';
}
if (!isFinite(value)) {
if (value > 0) {
- return '"Infinity"';
+ return 'Infinity';
} else {
- return '"-Infinity"';
+ return '-Infinity';
}
}
- return String(value);
-}
-
-
-// Mapping of some control characters to avoid the \uXXXX syntax for most
-// commonly used control cahracters.
-const ctrlCharMap_ = {
- '\b': '\\b',
- '\t': '\\t',
- '\n': '\\n',
- '\f': '\\f',
- '\r': '\\r',
- '"' : '\\"',
- '\\': '\\\\'
-};
-
-
-// Regular expression testing for ", \ and control characters (0x00 - 0x1F).
-const ctrlCharTest_ = new RegExp('["\\\\\x00-\x1F]');
-
-
-// Regular expression matching ", \ and control characters (0x00 - 0x1F)
-// globally.
-const ctrlCharMatch_ = new RegExp('["\\\\\x00-\x1F]', 'g');
-
-
-/**
- * Convert a String to its JSON representation (see http://www.json.org/). To
- * avoid depending on the String object this method calls the functions in
- * string.js directly and not through the value.
- * @param {String} value The String value to format as JSON
- * @return {string} JSON formatted String value
- */
-function StringToJSON_(value) {
- // Check for" , \ and control characters (0x00 - 0x1F). No need to call
- // RegExpTest as ctrlchar is constructed using RegExp.
- if (ctrlCharTest_.test(value)) {
- // Replace ", \ and control characters (0x00 - 0x1F).
- return '"' +
- value.replace(ctrlCharMatch_, function (char) {
- // Use charmap if possible.
- var mapped = ctrlCharMap_[char];
- if (mapped) return mapped;
- mapped = char.charCodeAt();
- // Convert control character to unicode escape sequence.
- return '\\u00' +
- %NumberToRadixString(Math.floor(mapped / 16), 16) +
- %NumberToRadixString(mapped % 16, 16);
- })
- + '"';
- }
-
- // Simple string with no special characters.
- return '"' + value + '"';
-}
-
-
-/**
- * Convert a Date to ISO 8601 format. To avoid depending on the Date object
- * this method calls the functions in date.js directly and not through the
- * value.
- * @param {Date} value The Date value to format as JSON
- * @return {string} JSON formatted Date value
- */
-function DateToISO8601_(value) {
- function f(n) {
- return n < 10 ? '0' + n : n;
- }
- function g(n) {
- return n < 10 ? '00' + n : n < 100 ? '0' + n : n;
- }
- return builtins.GetUTCFullYearFrom(value) + '-' +
- f(builtins.GetUTCMonthFrom(value) + 1) + '-' +
- f(builtins.GetUTCDateFrom(value)) + 'T' +
- f(builtins.GetUTCHoursFrom(value)) + ':' +
- f(builtins.GetUTCMinutesFrom(value)) + ':' +
- f(builtins.GetUTCSecondsFrom(value)) + '.' +
- g(builtins.GetUTCMillisecondsFrom(value)) + 'Z';
-}
-
-/**
- * Convert a Date to ISO 8601 format. To avoid depending on the Date object
- * this method calls the functions in date.js directly and not through the
- * value.
- * @param {Date} value The Date value to format as JSON
- * @return {string} JSON formatted Date value
- */
-function DateToJSON_(value) {
- return '"' + DateToISO8601_(value) + '"';
+ return value;
}
diff --git a/deps/v8/src/natives.h b/deps/v8/src/natives.h
index 3eb80909a..fdfd21358 100644
--- a/deps/v8/src/natives.h
+++ b/deps/v8/src/natives.h
@@ -28,7 +28,8 @@
#ifndef V8_NATIVES_H_
#define V8_NATIVES_H_
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
typedef bool (*NativeSourceCallback)(Vector<const char> name,
Vector<const char> source,
diff --git a/deps/v8/src/objects-debug.cc b/deps/v8/src/objects-debug.cc
index e17201478..ba07af7e0 100644
--- a/deps/v8/src/objects-debug.cc
+++ b/deps/v8/src/objects-debug.cc
@@ -32,7 +32,8 @@
#include "macro-assembler.h"
#include "jsregexp.h"
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
#ifdef DEBUG
@@ -941,6 +942,8 @@ void Script::ScriptPrint() {
column_offset()->ShortPrint();
PrintF("\n - type: ");
type()->ShortPrint();
+ PrintF("\n - id: ");
+ id()->ShortPrint();
PrintF("\n");
}
diff --git a/deps/v8/src/objects-inl.h b/deps/v8/src/objects-inl.h
index 782117808..d34e46539 100644
--- a/deps/v8/src/objects-inl.h
+++ b/deps/v8/src/objects-inl.h
@@ -40,7 +40,8 @@
#include "conversions-inl.h"
#include "property.h"
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
PropertyDetails::PropertyDetails(Smi* smi) {
value_ = smi->value();
@@ -764,9 +765,11 @@ Failure* Failure::RetryAfterGC(int requested_bytes) {
Failure* Failure::Construct(Type type, int value) {
int info = (value << kFailureTypeTagSize) | type;
+ // TODO(X64): Stop using Smi validation for non-smi checks, even if they
+ // happen to be identical at the moment.
ASSERT(Smi::IsValid(info)); // Same validation check as in Smi
return reinterpret_cast<Failure*>(
- static_cast<intptr_t>((info << kFailureTagSize) | kFailureTag));
+ (static_cast<intptr_t>(info) << kFailureTagSize) | kFailureTag);
}
@@ -1794,7 +1797,7 @@ int HeapObject::SizeFromMap(Map* map) {
void Map::set_instance_size(int value) {
- ASSERT((value & ~(kPointerSize - 1)) == value);
+ ASSERT_EQ(0, value & (kPointerSize - 1));
value >>= kPointerSizeLog2;
ASSERT(0 <= value && value < 256);
WRITE_BYTE_FIELD(this, kInstanceSizeOffset, static_cast<byte>(value));
@@ -1895,6 +1898,11 @@ Code::Kind Code::kind() {
}
+InLoopFlag Code::ic_in_loop() {
+ return ExtractICInLoopFromFlags(flags());
+}
+
+
InlineCacheState Code::ic_state() {
InlineCacheState result = ExtractICStateFromFlags(flags());
// Only allow uninitialized or debugger states for non-IC code
@@ -1941,11 +1949,13 @@ bool Code::is_inline_cache_stub() {
Code::Flags Code::ComputeFlags(Kind kind,
+ InLoopFlag in_loop,
InlineCacheState ic_state,
PropertyType type,
int argc) {
// Compute the bit mask.
int bits = kind << kFlagsKindShift;
+ if (in_loop) bits |= kFlagsICInLoopMask;
bits |= ic_state << kFlagsICStateShift;
bits |= type << kFlagsTypeShift;
bits |= argc << kFlagsArgumentsCountShift;
@@ -1953,6 +1963,7 @@ Code::Flags Code::ComputeFlags(Kind kind,
Flags result = static_cast<Flags>(bits);
ASSERT(ExtractKindFromFlags(result) == kind);
ASSERT(ExtractICStateFromFlags(result) == ic_state);
+ ASSERT(ExtractICInLoopFromFlags(result) == in_loop);
ASSERT(ExtractTypeFromFlags(result) == type);
ASSERT(ExtractArgumentsCountFromFlags(result) == argc);
return result;
@@ -1961,8 +1972,9 @@ Code::Flags Code::ComputeFlags(Kind kind,
Code::Flags Code::ComputeMonomorphicFlags(Kind kind,
PropertyType type,
+ InLoopFlag in_loop,
int argc) {
- return ComputeFlags(kind, MONOMORPHIC, type, argc);
+ return ComputeFlags(kind, in_loop, MONOMORPHIC, type, argc);
}
@@ -1978,6 +1990,12 @@ InlineCacheState Code::ExtractICStateFromFlags(Flags flags) {
}
+InLoopFlag Code::ExtractICInLoopFromFlags(Flags flags) {
+ int bits = (flags & kFlagsICInLoopMask);
+ return bits != 0 ? IN_LOOP : NOT_IN_LOOP;
+}
+
+
PropertyType Code::ExtractTypeFromFlags(Flags flags) {
int bits = (flags & kFlagsTypeMask) >> kFlagsTypeShift;
return static_cast<PropertyType>(bits);
@@ -2094,7 +2112,11 @@ ACCESSORS(Script, data, Object, kDataOffset)
ACCESSORS(Script, context_data, Object, kContextOffset)
ACCESSORS(Script, wrapper, Proxy, kWrapperOffset)
ACCESSORS(Script, type, Smi, kTypeOffset)
+ACCESSORS(Script, compilation_type, Smi, kCompilationTypeOffset)
ACCESSORS(Script, line_ends, Object, kLineEndsOffset)
+ACCESSORS(Script, eval_from_function, Object, kEvalFromFunctionOffset)
+ACCESSORS(Script, eval_from_instructions_offset, Smi,
+ kEvalFrominstructionsOffsetOffset)
#ifdef ENABLE_DEBUGGER_SUPPORT
ACCESSORS(DebugInfo, shared, SharedFunctionInfo, kSharedFunctionInfoIndex)
@@ -2536,6 +2558,24 @@ bool JSObject::HasElement(uint32_t index) {
}
+Smi* JSObject::InterceptorPropertyLookupHint(String* name) {
+ // TODO(antonm): Do we want to do any shortcuts for global object?
+ if (HasFastProperties()) {
+ LookupResult lookup;
+ LocalLookupRealNamedProperty(name, &lookup);
+ if (lookup.IsValid()) {
+ if (lookup.type() == FIELD && lookup.IsCacheable()) {
+ return Smi::FromInt(lookup.GetFieldIndex());
+ }
+ } else {
+ return Smi::FromInt(kLookupInPrototype);
+ }
+ }
+
+ return Smi::FromInt(kLookupInHolder);
+}
+
+
bool AccessorInfo::all_can_read() {
return BooleanBit::get(flag(), kAllCanReadBit);
}
diff --git a/deps/v8/src/objects.cc b/deps/v8/src/objects.cc
index 9a7f7aa67..0546578ab 100644
--- a/deps/v8/src/objects.cc
+++ b/deps/v8/src/objects.cc
@@ -41,7 +41,8 @@
#include "disassembler.h"
#endif
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
// Getters and setters are stored in a fixed array property. These are
// constants for their indices.
@@ -384,7 +385,9 @@ Object* JSObject::SetLazyProperty(LookupResult* result,
}
-Object* JSObject::DeleteLazyProperty(LookupResult* result, String* name) {
+Object* JSObject::DeleteLazyProperty(LookupResult* result,
+ String* name,
+ DeleteMode mode) {
HandleScope scope;
Handle<JSObject> this_handle(this);
Handle<String> name_handle(name);
@@ -392,7 +395,7 @@ Object* JSObject::DeleteLazyProperty(LookupResult* result, String* name) {
LoadLazy(Handle<JSObject>(JSObject::cast(result->GetLazyValue())),
&pending_exception);
if (pending_exception) return Failure::Exception();
- return this_handle->DeleteProperty(*name_handle);
+ return this_handle->DeleteProperty(*name_handle, mode);
}
@@ -536,6 +539,9 @@ void Failure::FailurePrint() {
Failure* Failure::RetryAfterGC(int requested_bytes, AllocationSpace space) {
ASSERT((space & ~kSpaceTagMask) == 0);
+ // TODO(X64): Stop using Smi validation for non-smi checks, even if they
+ // happen to be identical at the moment.
+
int requested = requested_bytes >> kObjectAlignmentBits;
int value = (requested << kSpaceTagSize) | space;
// We can't very well allocate a heap number in this situation, and if the
@@ -1736,8 +1742,10 @@ Object* JSObject::SetProperty(LookupResult* result,
}
return ConvertDescriptorToField(name, value, attributes);
case CONSTANT_FUNCTION:
- if (value == result->GetConstantFunction()) return value;
// Only replace the function if necessary.
+ if (value == result->GetConstantFunction()) return value;
+ // Preserve the attributes of this existing property.
+ attributes = result->GetAttributes();
return ConvertDescriptorToFieldAndMapTransition(name, value, attributes);
case CALLBACKS:
return SetPropertyWithCallback(result->GetCallbackObject(),
@@ -1817,8 +1825,10 @@ Object* JSObject::IgnoreAttributesAndSetLocalProperty(
}
return ConvertDescriptorToField(name, value, attributes);
case CONSTANT_FUNCTION:
- if (value == result->GetConstantFunction()) return value;
// Only replace the function if necessary.
+ if (value == result->GetConstantFunction()) return value;
+ // Preserve the attributes of this existing property.
+ attributes = result->GetAttributes();
return ConvertDescriptorToFieldAndMapTransition(name, value, attributes);
case CALLBACKS:
case INTERCEPTOR:
@@ -2112,7 +2122,7 @@ Object* JSObject::NormalizeElements() {
}
-Object* JSObject::DeletePropertyPostInterceptor(String* name) {
+Object* JSObject::DeletePropertyPostInterceptor(String* name, DeleteMode mode) {
// Check local property, ignore interceptor.
LookupResult result;
LocalLookupRealNamedProperty(name, &result);
@@ -2126,7 +2136,7 @@ Object* JSObject::DeletePropertyPostInterceptor(String* name) {
// Attempt to remove the property from the property dictionary.
Dictionary* dictionary = property_dictionary();
int entry = dictionary->FindStringEntry(name);
- if (entry != -1) return dictionary->DeleteProperty(entry);
+ if (entry != -1) return dictionary->DeleteProperty(entry, mode);
return Heap::true_value();
}
@@ -2156,13 +2166,15 @@ Object* JSObject::DeletePropertyWithInterceptor(String* name) {
return *v8::Utils::OpenHandle(*result);
}
}
- Object* raw_result = this_handle->DeletePropertyPostInterceptor(*name_handle);
+ Object* raw_result =
+ this_handle->DeletePropertyPostInterceptor(*name_handle, NORMAL_DELETION);
RETURN_IF_SCHEDULED_EXCEPTION();
return raw_result;
}
-Object* JSObject::DeleteElementPostInterceptor(uint32_t index) {
+Object* JSObject::DeleteElementPostInterceptor(uint32_t index,
+ DeleteMode mode) {
if (HasFastElements()) {
uint32_t length = IsJSArray() ?
static_cast<uint32_t>(Smi::cast(JSArray::cast(this)->length())->value()) :
@@ -2175,7 +2187,7 @@ Object* JSObject::DeleteElementPostInterceptor(uint32_t index) {
ASSERT(!HasFastElements());
Dictionary* dictionary = element_dictionary();
int entry = dictionary->FindNumberEntry(index);
- if (entry != -1) return dictionary->DeleteProperty(entry);
+ if (entry != -1) return dictionary->DeleteProperty(entry, mode);
return Heap::true_value();
}
@@ -2206,13 +2218,14 @@ Object* JSObject::DeleteElementWithInterceptor(uint32_t index) {
ASSERT(result->IsBoolean());
return *v8::Utils::OpenHandle(*result);
}
- Object* raw_result = this_handle->DeleteElementPostInterceptor(index);
+ Object* raw_result =
+ this_handle->DeleteElementPostInterceptor(index, NORMAL_DELETION);
RETURN_IF_SCHEDULED_EXCEPTION();
return raw_result;
}
-Object* JSObject::DeleteElement(uint32_t index) {
+Object* JSObject::DeleteElement(uint32_t index, DeleteMode mode) {
// Check access rights if needed.
if (IsAccessCheckNeeded() &&
!Top::MayIndexedAccess(this, index, v8::ACCESS_DELETE)) {
@@ -2224,10 +2237,14 @@ Object* JSObject::DeleteElement(uint32_t index) {
Object* proto = GetPrototype();
if (proto->IsNull()) return Heap::false_value();
ASSERT(proto->IsJSGlobalObject());
- return JSGlobalObject::cast(proto)->DeleteElement(index);
+ return JSGlobalObject::cast(proto)->DeleteElement(index, mode);
}
if (HasIndexedInterceptor()) {
+ // Skip interceptor if forcing deletion.
+ if (mode == FORCE_DELETION) {
+ return DeleteElementPostInterceptor(index, mode);
+ }
return DeleteElementWithInterceptor(index);
}
@@ -2242,13 +2259,13 @@ Object* JSObject::DeleteElement(uint32_t index) {
} else {
Dictionary* dictionary = element_dictionary();
int entry = dictionary->FindNumberEntry(index);
- if (entry != -1) return dictionary->DeleteProperty(entry);
+ if (entry != -1) return dictionary->DeleteProperty(entry, mode);
}
return Heap::true_value();
}
-Object* JSObject::DeleteProperty(String* name) {
+Object* JSObject::DeleteProperty(String* name, DeleteMode mode) {
// ECMA-262, 3rd, 8.6.2.5
ASSERT(name->IsString());
@@ -2263,23 +2280,32 @@ Object* JSObject::DeleteProperty(String* name) {
Object* proto = GetPrototype();
if (proto->IsNull()) return Heap::false_value();
ASSERT(proto->IsJSGlobalObject());
- return JSGlobalObject::cast(proto)->DeleteProperty(name);
+ return JSGlobalObject::cast(proto)->DeleteProperty(name, mode);
}
uint32_t index = 0;
if (name->AsArrayIndex(&index)) {
- return DeleteElement(index);
+ return DeleteElement(index, mode);
} else {
LookupResult result;
LocalLookup(name, &result);
if (!result.IsValid()) return Heap::true_value();
- if (result.IsDontDelete()) return Heap::false_value();
+ // Ignore attributes if forcing a deletion.
+ if (result.IsDontDelete() && mode != FORCE_DELETION) {
+ return Heap::false_value();
+ }
// Check for interceptor.
if (result.type() == INTERCEPTOR) {
+ // Skip interceptor if forcing a deletion.
+ if (mode == FORCE_DELETION) {
+ return DeletePropertyPostInterceptor(name, mode);
+ }
return DeletePropertyWithInterceptor(name);
}
if (!result.IsLoaded()) {
- return JSObject::cast(this)->DeleteLazyProperty(&result, name);
+ return JSObject::cast(this)->DeleteLazyProperty(&result,
+ name,
+ mode);
}
// Normalize object if needed.
Object* obj = NormalizeProperties(CLEAR_INOBJECT_PROPERTIES);
@@ -2287,7 +2313,7 @@ Object* JSObject::DeleteProperty(String* name) {
// Make sure the properties are normalized before removing the entry.
Dictionary* dictionary = property_dictionary();
int entry = dictionary->FindStringEntry(name);
- if (entry != -1) return dictionary->DeleteProperty(entry);
+ if (entry != -1) return dictionary->DeleteProperty(entry, mode);
return Heap::true_value();
}
}
@@ -3433,8 +3459,8 @@ const uc16* SeqTwoByteString::SeqTwoByteStringGetData(unsigned start) {
void SeqTwoByteString::SeqTwoByteStringReadBlockIntoBuffer(ReadBlockBuffer* rbb,
- unsigned* offset_ptr,
- unsigned max_chars) {
+ unsigned* offset_ptr,
+ unsigned max_chars) {
unsigned chars_read = 0;
unsigned offset = *offset_ptr;
while (chars_read < max_chars) {
@@ -4852,7 +4878,6 @@ const char* Code::Kind2String(Kind kind) {
const char* Code::ICState2String(InlineCacheState state) {
switch (state) {
case UNINITIALIZED: return "UNINITIALIZED";
- case UNINITIALIZED_IN_LOOP: return "UNINITIALIZED_IN_LOOP";
case PREMONOMORPHIC: return "PREMONOMORPHIC";
case MONOMORPHIC: return "MONOMORPHIC";
case MONOMORPHIC_PROTOTYPE_FAILURE: return "MONOMORPHIC_PROTOTYPE_FAILURE";
@@ -5145,42 +5170,6 @@ bool JSObject::HasLocalElement(uint32_t index) {
}
-Object* JSObject::GetHiddenProperties(bool create_if_needed) {
- String* key = Heap::hidden_symbol();
- if (this->HasFastProperties()) {
- // If the object has fast properties, check whether the first slot
- // in the descriptor array matches the hidden symbol. Since the
- // hidden symbols hash code is zero (and no other string has hash
- // code zero) it will always occupy the first entry if present.
- DescriptorArray* descriptors = this->map()->instance_descriptors();
- DescriptorReader r(descriptors);
- if (!r.eos() && (r.GetKey() == key) && r.IsProperty()) {
- ASSERT(r.type() == FIELD);
- return FastPropertyAt(r.GetFieldIndex());
- }
- }
-
- // Only attempt to find the hidden properties in the local object and not
- // in the prototype chain. Note that HasLocalProperty() can cause a GC in
- // the general case, but in this case we know it won't hit an interceptor.
- if (!this->HasLocalProperty(key)) {
- // Hidden properties object not found. Allocate a new hidden properties
- // object if requested. Otherwise return the undefined value.
- if (create_if_needed) {
- Object* obj = Heap::AllocateJSObject(
- Top::context()->global_context()->object_function());
- if (obj->IsFailure()) {
- return obj;
- }
- return this->SetProperty(key, obj, DONT_ENUM);
- } else {
- return Heap::undefined_value();
- }
- }
- return this->GetProperty(key);
-}
-
-
bool JSObject::HasElementWithReceiver(JSObject* receiver, uint32_t index) {
// Check access rights if needed.
if (IsAccessCheckNeeded() &&
@@ -5646,9 +5635,11 @@ Object* JSObject::GetPropertyPostInterceptor(JSObject* receiver,
}
-Object* JSObject::GetPropertyWithInterceptor(JSObject* receiver,
- String* name,
- PropertyAttributes* attributes) {
+bool JSObject::GetPropertyWithInterceptorProper(
+ JSObject* receiver,
+ String* name,
+ PropertyAttributes* attributes,
+ Object** result_object) {
HandleScope scope;
Handle<InterceptorInfo> interceptor(GetNamedInterceptor());
Handle<JSObject> receiver_handle(receiver);
@@ -5669,19 +5660,93 @@ Object* JSObject::GetPropertyWithInterceptor(JSObject* receiver,
VMState state(EXTERNAL);
result = getter(v8::Utils::ToLocal(name_handle), info);
}
- RETURN_IF_SCHEDULED_EXCEPTION();
+ if (Top::has_scheduled_exception()) {
+ return false;
+ }
if (!result.IsEmpty()) {
*attributes = NONE;
- return *v8::Utils::OpenHandle(*result);
+ *result_object = *v8::Utils::OpenHandle(*result);
+ return true;
}
}
- Object* raw_result = holder_handle->GetPropertyPostInterceptor(
+ return false;
+}
+
+
+Object* JSObject::GetInterceptorPropertyWithLookupHint(
+ JSObject* receiver,
+ Smi* lookup_hint,
+ String* name,
+ PropertyAttributes* attributes) {
+ HandleScope scope;
+ Handle<JSObject> receiver_handle(receiver);
+ Handle<JSObject> holder_handle(this);
+ Handle<String> name_handle(name);
+
+ Object* result = NULL;
+ if (GetPropertyWithInterceptorProper(receiver, name, attributes, &result)) {
+ return result;
+ } else {
+ RETURN_IF_SCHEDULED_EXCEPTION();
+ }
+
+ int property_index = lookup_hint->value();
+ if (property_index >= 0) {
+ result = holder_handle->FastPropertyAt(property_index);
+ } else {
+ switch (property_index) {
+ case kLookupInPrototype: {
+ Object* pt = holder_handle->GetPrototype();
+ *attributes = ABSENT;
+ if (pt == Heap::null_value()) return Heap::undefined_value();
+ result = pt->GetPropertyWithReceiver(
+ *receiver_handle,
+ *name_handle,
+ attributes);
+ RETURN_IF_SCHEDULED_EXCEPTION();
+ }
+ break;
+
+ case kLookupInHolder:
+ result = holder_handle->GetPropertyPostInterceptor(
+ *receiver_handle,
+ *name_handle,
+ attributes);
+ RETURN_IF_SCHEDULED_EXCEPTION();
+ break;
+
+ default:
+ UNREACHABLE();
+ }
+ }
+
+ return result;
+}
+
+
+Object* JSObject::GetPropertyWithInterceptor(
+ JSObject* receiver,
+ String* name,
+ PropertyAttributes* attributes) {
+ HandleScope scope;
+ Handle<JSObject> receiver_handle(receiver);
+ Handle<JSObject> holder_handle(this);
+ Handle<String> name_handle(name);
+
+ Object* result = NULL;
+ if (GetPropertyWithInterceptorProper(receiver, name, attributes, &result)) {
+ return result;
+ } else {
+ RETURN_IF_SCHEDULED_EXCEPTION();
+ }
+
+ result = holder_handle->GetPropertyPostInterceptor(
*receiver_handle,
*name_handle,
attributes);
RETURN_IF_SCHEDULED_EXCEPTION();
- return raw_result;
+ return result;
}
@@ -5968,20 +6033,6 @@ int JSObject::GetEnumElementKeys(FixedArray* storage) {
}
-// Thomas Wang, Integer Hash Functions.
-// http://www.concentric.net/~Ttwang/tech/inthash.htm
-static uint32_t ComputeIntegerHash(uint32_t key) {
- uint32_t hash = key;
- hash = ~hash + (hash << 15); // hash = (hash << 15) - hash - 1;
- hash = hash ^ (hash >> 12);
- hash = hash + (hash << 2);
- hash = hash ^ (hash >> 4);
- hash = hash * 2057; // hash = (hash + (hash << 3)) + (hash << 11);
- hash = hash ^ (hash >> 16);
- return hash;
-}
-
-
// The NumberKey uses carries the uint32_t as key.
// This avoids allocation in HasProperty.
class NumberKey : public HashTableKey {
@@ -6725,7 +6776,10 @@ class MapNameKey : public HashTableKey {
virtual HashFunction GetHashFunction() { return MapNameHash; }
static uint32_t MapNameHashHelper(Map* map, String* name) {
- return reinterpret_cast<uint32_t>(map) ^ name->Hash();
+ // Uses only lower 32 bits if pointers are larger.
+ uintptr_t addr_hash =
+ static_cast<uint32_t>(reinterpret_cast<uintptr_t>(map));
+ return addr_hash ^ name->Hash();
}
static uint32_t MapNameHash(Object* obj) {
@@ -6907,9 +6961,12 @@ void Dictionary::RemoveNumberEntries(uint32_t from, uint32_t to) {
}
-Object* Dictionary::DeleteProperty(int entry) {
+Object* Dictionary::DeleteProperty(int entry, JSObject::DeleteMode mode) {
PropertyDetails details = DetailsAt(entry);
- if (details.IsDontDelete()) return Heap::false_value();
+ // Ignore attributes if forcing a deletion.
+ if (details.IsDontDelete() && mode == JSObject::NORMAL_DELETION) {
+ return Heap::false_value();
+ }
SetEntry(entry, Heap::null_value(), Heap::null_value(), Smi::FromInt(0));
ElementRemoved();
return Heap::true_value();
diff --git a/deps/v8/src/objects.h b/deps/v8/src/objects.h
index 3e132ff19..493d22b41 100644
--- a/deps/v8/src/objects.h
+++ b/deps/v8/src/objects.h
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2006-2009 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -50,7 +50,6 @@
// - JSBuiltinsObject
// - JSGlobalProxy
// - JSValue
-// - Script
// - Array
// - ByteArray
// - FixedArray
@@ -83,8 +82,10 @@
// - AccessCheckInfo
// - InterceptorInfo
// - CallHandlerInfo
-// - FunctionTemplateInfo
-// - ObjectTemplateInfo
+// - TemplateInfo
+// - FunctionTemplateInfo
+// - ObjectTemplateInfo
+// - Script
// - SignatureInfo
// - TypeSwitchInfo
// - DebugInfo
@@ -108,7 +109,8 @@ enum PropertyAttributes {
// a non-existent property.
};
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
// PropertyDetails captures type and attributes for a property.
@@ -295,12 +297,14 @@ enum PropertyNormalizationMode {
// Since string types are not consecutive, this macro is used to
// iterate over them.
#define STRING_TYPE_LIST(V) \
- V(SHORT_SYMBOL_TYPE, SeqTwoByteString::kHeaderSize, short_symbol) \
- V(MEDIUM_SYMBOL_TYPE, SeqTwoByteString::kHeaderSize, medium_symbol) \
- V(LONG_SYMBOL_TYPE, SeqTwoByteString::kHeaderSize, long_symbol) \
- V(SHORT_ASCII_SYMBOL_TYPE, SeqAsciiString::kHeaderSize, short_ascii_symbol) \
- V(MEDIUM_ASCII_SYMBOL_TYPE, SeqAsciiString::kHeaderSize, medium_ascii_symbol)\
- V(LONG_ASCII_SYMBOL_TYPE, SeqAsciiString::kHeaderSize, long_ascii_symbol) \
+ V(SHORT_SYMBOL_TYPE, SeqTwoByteString::kAlignedSize, short_symbol) \
+ V(MEDIUM_SYMBOL_TYPE, SeqTwoByteString::kAlignedSize, medium_symbol) \
+ V(LONG_SYMBOL_TYPE, SeqTwoByteString::kAlignedSize, long_symbol) \
+ V(SHORT_ASCII_SYMBOL_TYPE, SeqAsciiString::kAlignedSize, short_ascii_symbol) \
+ V(MEDIUM_ASCII_SYMBOL_TYPE, \
+ SeqAsciiString::kAlignedSize, \
+ medium_ascii_symbol) \
+ V(LONG_ASCII_SYMBOL_TYPE, SeqAsciiString::kAlignedSize, long_ascii_symbol) \
V(SHORT_CONS_SYMBOL_TYPE, ConsString::kSize, short_cons_symbol) \
V(MEDIUM_CONS_SYMBOL_TYPE, ConsString::kSize, medium_cons_symbol) \
V(LONG_CONS_SYMBOL_TYPE, ConsString::kSize, long_cons_symbol) \
@@ -337,12 +341,14 @@ enum PropertyNormalizationMode {
V(LONG_EXTERNAL_ASCII_SYMBOL_TYPE, \
ExternalAsciiString::kSize, \
long_external_ascii_symbol) \
- V(SHORT_STRING_TYPE, SeqTwoByteString::kHeaderSize, short_string) \
- V(MEDIUM_STRING_TYPE, SeqTwoByteString::kHeaderSize, medium_string) \
- V(LONG_STRING_TYPE, SeqTwoByteString::kHeaderSize, long_string) \
- V(SHORT_ASCII_STRING_TYPE, SeqAsciiString::kHeaderSize, short_ascii_string) \
- V(MEDIUM_ASCII_STRING_TYPE, SeqAsciiString::kHeaderSize, medium_ascii_string)\
- V(LONG_ASCII_STRING_TYPE, SeqAsciiString::kHeaderSize, long_ascii_string) \
+ V(SHORT_STRING_TYPE, SeqTwoByteString::kAlignedSize, short_string) \
+ V(MEDIUM_STRING_TYPE, SeqTwoByteString::kAlignedSize, medium_string) \
+ V(LONG_STRING_TYPE, SeqTwoByteString::kAlignedSize, long_string) \
+ V(SHORT_ASCII_STRING_TYPE, SeqAsciiString::kAlignedSize, short_ascii_string) \
+ V(MEDIUM_ASCII_STRING_TYPE, \
+ SeqAsciiString::kAlignedSize, \
+ medium_ascii_string) \
+ V(LONG_ASCII_STRING_TYPE, SeqAsciiString::kAlignedSize, long_ascii_string) \
V(SHORT_CONS_STRING_TYPE, ConsString::kSize, short_cons_string) \
V(MEDIUM_CONS_STRING_TYPE, ConsString::kSize, medium_cons_string) \
V(LONG_CONS_STRING_TYPE, ConsString::kSize, long_cons_string) \
@@ -771,8 +777,10 @@ class Object BASE_EMBEDDED {
// Smi represents integer Numbers that can be stored in 31 bits.
+// TODO(X64) Increase to 53 bits?
// Smis are immediate which means they are NOT allocated in the heap.
// The this pointer has the following format: [31 bit signed int] 0
+// TODO(X64): 31 bits signed int sign-extended to 63 bits.
// Smi stands for small integer.
class Smi: public Object {
public:
@@ -1267,9 +1275,12 @@ class JSObject: public HeapObject {
return GetLocalPropertyAttribute(name) != ABSENT;
}
- Object* DeleteProperty(String* name);
- Object* DeleteElement(uint32_t index);
- Object* DeleteLazyProperty(LookupResult* result, String* name);
+ enum DeleteMode { NORMAL_DELETION, FORCE_DELETION };
+ Object* DeleteProperty(String* name, DeleteMode mode);
+ Object* DeleteElement(uint32_t index, DeleteMode mode);
+ Object* DeleteLazyProperty(LookupResult* result,
+ String* name,
+ DeleteMode mode);
// Tests for the fast common case for property enumeration.
bool IsSimpleEnum();
@@ -1286,11 +1297,6 @@ class JSObject: public HeapObject {
// Return the object's prototype (might be Heap::null_value()).
inline Object* GetPrototype();
- // Return the object's hidden properties object. If the object has no hidden
- // properties and create_if_needed is true, then a new hidden property object
- // will be allocated. Otherwise the Heap::undefined_value is returned.
- Object* GetHiddenProperties(bool create_if_needed);
-
// Tells whether the index'th element is present.
inline bool HasElement(uint32_t index);
bool HasElementWithReceiver(JSObject* receiver, uint32_t index);
@@ -1346,6 +1352,14 @@ class JSObject: public HeapObject {
Object* LookupCallbackSetterInPrototypes(uint32_t index);
void LookupCallback(String* name, LookupResult* result);
+ inline Smi* InterceptorPropertyLookupHint(String* name);
+ Object* GetInterceptorPropertyWithLookupHint(JSObject* receiver,
+ Smi* lookup_hint,
+ String* name,
+ PropertyAttributes* attributes);
+ static const int kLookupInHolder = -1;
+ static const int kLookupInPrototype = -2;
+
// Returns the number of properties on this object filtering out properties
// with the specified attributes (ignoring interceptors).
int NumberOfLocalProperties(PropertyAttributes filter);
@@ -1508,10 +1522,10 @@ class JSObject: public HeapObject {
Object* GetElementPostInterceptor(JSObject* receiver, uint32_t index);
- Object* DeletePropertyPostInterceptor(String* name);
+ Object* DeletePropertyPostInterceptor(String* name, DeleteMode mode);
Object* DeletePropertyWithInterceptor(String* name);
- Object* DeleteElementPostInterceptor(uint32_t index);
+ Object* DeleteElementPostInterceptor(uint32_t index, DeleteMode mode);
Object* DeleteElementWithInterceptor(uint32_t index);
PropertyAttributes GetPropertyAttributePostInterceptor(JSObject* receiver,
@@ -1537,6 +1551,14 @@ class JSObject: public HeapObject {
void LookupInDescriptor(String* name, LookupResult* result);
+ // Attempts to get property with a named interceptor getter. Returns
+ // |true| and stores result into |result| if succesful, otherwise
+ // returns |false|
+ bool GetPropertyWithInterceptorProper(JSObject* receiver,
+ String* name,
+ PropertyAttributes* attributes,
+ Object** result);
+
DISALLOW_IMPLICIT_CONSTRUCTORS(JSObject);
};
@@ -1555,6 +1577,7 @@ class Array: public HeapObject {
// Layout descriptor.
static const int kLengthOffset = HeapObject::kHeaderSize;
static const int kHeaderSize = kLengthOffset + kIntSize;
+ static const int kAlignedSize = POINTER_SIZE_ALIGN(kHeaderSize);
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(Array);
@@ -1601,6 +1624,9 @@ class FixedArray: public Array {
// Casting.
static inline FixedArray* cast(Object* obj);
+ // Align data at kPointerSize, even if Array.kHeaderSize isn't aligned.
+ static const int kHeaderSize = POINTER_SIZE_ALIGN(Array::kHeaderSize);
+
// Dispatched behavior.
int FixedArraySize() { return SizeFor(length()); }
void FixedArrayIterateBody(ObjectVisitor* v);
@@ -2034,7 +2060,7 @@ class Dictionary: public DictionaryBase {
int FindNumberEntry(uint32_t index);
// Delete a property from the dictionary.
- Object* DeleteProperty(int entry);
+ Object* DeleteProperty(int entry, JSObject::DeleteMode mode);
// Type specific at put (default NONE attributes is used when adding).
Object* AtStringPut(String* key, Object* value);
@@ -2152,7 +2178,7 @@ class ByteArray: public Array {
inline int get_int(int index);
static int SizeFor(int length) {
- return kHeaderSize + OBJECT_SIZE_ALIGN(length);
+ return OBJECT_SIZE_ALIGN(kHeaderSize + length);
}
// We use byte arrays for free blocks in the heap. Given a desired size in
// bytes that is a multiple of the word size and big enough to hold a byte
@@ -2246,9 +2272,10 @@ class Code: public HeapObject {
// [flags]: Access to specific code flags.
inline Kind kind();
- inline InlineCacheState ic_state(); // only valid for IC stubs
- inline PropertyType type(); // only valid for monomorphic IC stubs
- inline int arguments_count(); // only valid for call IC stubs
+ inline InlineCacheState ic_state(); // Only valid for IC stubs.
+ inline InLoopFlag ic_in_loop(); // Only valid for IC stubs..
+ inline PropertyType type(); // Only valid for monomorphic IC stubs.
+ inline int arguments_count(); // Only valid for call IC stubs.
// Testers for IC stub kinds.
inline bool is_inline_cache_stub();
@@ -2270,16 +2297,20 @@ class Code: public HeapObject {
// Flags operations.
static inline Flags ComputeFlags(Kind kind,
+ InLoopFlag in_loop = NOT_IN_LOOP,
InlineCacheState ic_state = UNINITIALIZED,
PropertyType type = NORMAL,
int argc = -1);
- static inline Flags ComputeMonomorphicFlags(Kind kind,
- PropertyType type,
- int argc = -1);
+ static inline Flags ComputeMonomorphicFlags(
+ Kind kind,
+ PropertyType type,
+ InLoopFlag in_loop = NOT_IN_LOOP,
+ int argc = -1);
static inline Kind ExtractKindFromFlags(Flags flags);
static inline InlineCacheState ExtractICStateFromFlags(Flags flags);
+ static inline InLoopFlag ExtractICInLoopFromFlags(Flags flags);
static inline PropertyType ExtractTypeFromFlags(Flags flags);
static inline int ExtractArgumentsCountFromFlags(Flags flags);
static inline Flags RemoveTypeFromFlags(Flags flags);
@@ -2349,6 +2380,9 @@ class Code: public HeapObject {
void CodePrint();
void CodeVerify();
#endif
+ // Code entry points are aligned to 32 bytes.
+ static const int kCodeAlignment = 32;
+ static const int kCodeAlignmentMask = kCodeAlignment - 1;
// Layout description.
static const int kInstructionSizeOffset = HeapObject::kHeaderSize;
@@ -2356,14 +2390,11 @@ class Code: public HeapObject {
static const int kSInfoSizeOffset = kRelocationSizeOffset + kIntSize;
static const int kFlagsOffset = kSInfoSizeOffset + kIntSize;
static const int kKindSpecificFlagsOffset = kFlagsOffset + kIntSize;
- // Add filler objects to align the instruction start following right after
+ // Add padding to align the instruction start following right after
// the Code object header.
- static const int kFiller6Offset = kKindSpecificFlagsOffset + kIntSize;
- static const int kFiller7Offset = kFiller6Offset + kIntSize;
- static const int kHeaderSize = kFiller7Offset + kIntSize;
-
- // Code entry points are aligned to 32 bytes.
- static const int kCodeAlignment = 32;
+ static const int kHeaderSize =
+ (kKindSpecificFlagsOffset + kIntSize + kCodeAlignmentMask) &
+ ~kCodeAlignmentMask;
// Byte offsets within kKindSpecificFlagsOffset.
static const int kICFlagOffset = kKindSpecificFlagsOffset + 0;
@@ -2371,14 +2402,19 @@ class Code: public HeapObject {
// Flags layout.
static const int kFlagsICStateShift = 0;
- static const int kFlagsKindShift = 3;
- static const int kFlagsTypeShift = 6;
- static const int kFlagsArgumentsCountShift = 9;
+ static const int kFlagsICInLoopShift = 3;
+ static const int kFlagsKindShift = 4;
+ static const int kFlagsTypeShift = 7;
+ static const int kFlagsArgumentsCountShift = 10;
+
+ static const int kFlagsICStateMask = 0x00000007; // 0000000111
+ static const int kFlagsICInLoopMask = 0x00000008; // 0000001000
+ static const int kFlagsKindMask = 0x00000070; // 0001110000
+ static const int kFlagsTypeMask = 0x00000380; // 1110000000
+ static const int kFlagsArgumentsCountMask = 0xFFFFFC00;
- static const int kFlagsICStateMask = 0x00000007; // 000000111
- static const int kFlagsKindMask = 0x00000038; // 000111000
- static const int kFlagsTypeMask = 0x000001C0; // 111000000
- static const int kFlagsArgumentsCountMask = 0xFFFFFE00;
+ static const int kFlagsNotUsedInLookup =
+ (kFlagsICInLoopMask | kFlagsTypeMask);
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(Code);
@@ -2572,7 +2608,7 @@ class Map: public HeapObject {
static const int kInstanceDescriptorsOffset =
kConstructorOffset + kPointerSize;
static const int kCodeCacheOffset = kInstanceDescriptorsOffset + kPointerSize;
- static const int kSize = kCodeCacheOffset + kIntSize;
+ static const int kSize = kCodeCacheOffset + kPointerSize;
// Byte offsets within kInstanceSizesOffset.
static const int kInstanceSizeOffset = kInstanceSizesOffset + 0;
@@ -2595,7 +2631,7 @@ class Map: public HeapObject {
static const int kHasInstanceCallHandler = 6;
static const int kIsAccessCheckNeeded = 7;
- // Bit positions for but field 2
+ // Bit positions for bit field 2
static const int kNeedsLoading = 0;
private:
@@ -2613,17 +2649,23 @@ class Struct: public HeapObject {
};
-// Script types.
-enum ScriptType {
- SCRIPT_TYPE_NATIVE,
- SCRIPT_TYPE_EXTENSION,
- SCRIPT_TYPE_NORMAL
-};
-
-
// Script describes a script which has been added to the VM.
class Script: public Struct {
public:
+ // Script types.
+ enum Type {
+ TYPE_NATIVE,
+ TYPE_EXTENSION,
+ TYPE_NORMAL
+ };
+
+ // Script compilation types.
+ enum CompilationType {
+ COMPILATION_TYPE_HOST,
+ COMPILATION_TYPE_EVAL,
+ COMPILATION_TYPE_JSON
+ };
+
// [source]: the script source.
DECL_ACCESSORS(source, Object)
@@ -2652,9 +2694,20 @@ class Script: public Struct {
// [type]: the script type.
DECL_ACCESSORS(type, Smi)
- // [line_ends]: array of line ends positions
+ // [compilation]: how the the script was compiled.
+ DECL_ACCESSORS(compilation_type, Smi)
+
+ // [line_ends]: array of line ends positions.
DECL_ACCESSORS(line_ends, Object)
+ // [eval_from_function]: for eval scripts the funcion from which eval was
+ // called.
+ DECL_ACCESSORS(eval_from_function, Object)
+
+ // [eval_from_instructions_offset]: the instruction offset in the code for the
+ // function from which eval was called where eval was called.
+ DECL_ACCESSORS(eval_from_instructions_offset, Smi)
+
static inline Script* cast(Object* obj);
#ifdef DEBUG
@@ -2670,9 +2723,13 @@ class Script: public Struct {
static const int kContextOffset = kDataOffset + kPointerSize;
static const int kWrapperOffset = kContextOffset + kPointerSize;
static const int kTypeOffset = kWrapperOffset + kPointerSize;
- static const int kLineEndsOffset = kTypeOffset + kPointerSize;
+ static const int kCompilationTypeOffset = kTypeOffset + kPointerSize;
+ static const int kLineEndsOffset = kCompilationTypeOffset + kPointerSize;
static const int kIdOffset = kLineEndsOffset + kPointerSize;
- static const int kSize = kIdOffset + kPointerSize;
+ static const int kEvalFromFunctionOffset = kIdOffset + kPointerSize;
+ static const int kEvalFrominstructionsOffsetOffset =
+ kEvalFromFunctionOffset + kPointerSize;
+ static const int kSize = kEvalFrominstructionsOffsetOffset + kPointerSize;
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(Script);
@@ -2781,21 +2838,23 @@ class SharedFunctionInfo: public HeapObject {
static const int kDontAdaptArgumentsSentinel = -1;
// Layout description.
+ // (An even number of integers has a size that is a multiple of a pointer.)
static const int kNameOffset = HeapObject::kHeaderSize;
static const int kCodeOffset = kNameOffset + kPointerSize;
static const int kLengthOffset = kCodeOffset + kPointerSize;
static const int kFormalParameterCountOffset = kLengthOffset + kIntSize;
static const int kExpectedNofPropertiesOffset =
kFormalParameterCountOffset + kIntSize;
- static const int kInstanceClassNameOffset =
+ static const int kStartPositionAndTypeOffset =
kExpectedNofPropertiesOffset + kIntSize;
+ static const int kEndPositionOffset = kStartPositionAndTypeOffset + kIntSize;
+ static const int kFunctionTokenPositionOffset = kEndPositionOffset + kIntSize;
+ static const int kInstanceClassNameOffset =
+ kFunctionTokenPositionOffset + kIntSize;
static const int kExternalReferenceDataOffset =
kInstanceClassNameOffset + kPointerSize;
static const int kScriptOffset = kExternalReferenceDataOffset + kPointerSize;
- static const int kStartPositionAndTypeOffset = kScriptOffset + kPointerSize;
- static const int kEndPositionOffset = kStartPositionAndTypeOffset + kIntSize;
- static const int kFunctionTokenPositionOffset = kEndPositionOffset + kIntSize;
- static const int kDebugInfoOffset = kFunctionTokenPositionOffset + kIntSize;
+ static const int kDebugInfoOffset = kScriptOffset + kPointerSize;
static const int kInferredNameOffset = kDebugInfoOffset + kPointerSize;
static const int kSize = kInferredNameOffset + kPointerSize;
@@ -3105,7 +3164,7 @@ class JSRegExp: public JSObject {
#endif
static const int kDataOffset = JSObject::kHeaderSize;
- static const int kSize = kDataOffset + kIntSize;
+ static const int kSize = kDataOffset + kPointerSize;
// Indices in the data array.
static const int kTagIndex = 0;
@@ -3370,6 +3429,7 @@ class String: public HeapObject {
// Layout description.
static const int kLengthOffset = HeapObject::kHeaderSize;
static const int kSize = kLengthOffset + kIntSize;
+ // Notice: kSize is not pointer-size aligned if pointers are 64-bit.
// Limits on sizes of different types of strings.
static const int kMaxShortStringSize = 63;
@@ -3518,11 +3578,12 @@ class SeqAsciiString: public SeqString {
// Computes the size for an AsciiString instance of a given length.
static int SizeFor(int length) {
- return kHeaderSize + OBJECT_SIZE_ALIGN(length * kCharSize);
+ return OBJECT_SIZE_ALIGN(kHeaderSize + length * kCharSize);
}
// Layout description.
static const int kHeaderSize = String::kSize;
+ static const int kAlignedSize = POINTER_SIZE_ALIGN(kHeaderSize);
// Support for StringInputBuffer.
inline void SeqAsciiStringReadBlockIntoBuffer(ReadBlockBuffer* buffer,
@@ -3563,11 +3624,12 @@ class SeqTwoByteString: public SeqString {
// Computes the size for a TwoByteString instance of a given length.
static int SizeFor(int length) {
- return kHeaderSize + OBJECT_SIZE_ALIGN(length * kShortSize);
+ return OBJECT_SIZE_ALIGN(kHeaderSize + length * kShortSize);
}
// Layout description.
static const int kHeaderSize = String::kSize;
+ static const int kAlignedSize = POINTER_SIZE_ALIGN(kHeaderSize);
// Support for StringInputBuffer.
inline void SeqTwoByteStringReadBlockIntoBuffer(ReadBlockBuffer* buffer,
@@ -3617,7 +3679,7 @@ class ConsString: public String {
void ConsStringIterateBody(ObjectVisitor* v);
// Layout description.
- static const int kFirstOffset = String::kSize;
+ static const int kFirstOffset = POINTER_SIZE_ALIGN(String::kSize);
static const int kSecondOffset = kFirstOffset + kPointerSize;
static const int kSize = kSecondOffset + kPointerSize;
@@ -3661,9 +3723,18 @@ class SlicedString: public String {
void SlicedStringIterateBody(ObjectVisitor* v);
// Layout description
+#if V8_HOST_ARCH_64_BIT
+ // Optimizations expect buffer to be located at same offset as a ConsString's
+ // first substring. In 64 bit mode we have room for the size before the
+ // buffer.
+ static const int kStartOffset = String::kSize;
+ static const int kBufferOffset = kStartOffset + kIntSize;
+ static const int kSize = kBufferOffset + kPointerSize;
+#else
static const int kBufferOffset = String::kSize;
static const int kStartOffset = kBufferOffset + kPointerSize;
static const int kSize = kStartOffset + kIntSize;
+#endif
// Support for StringInputBuffer.
inline const unibrow::byte* SlicedStringReadBlock(ReadBlockBuffer* buffer,
@@ -3693,7 +3764,7 @@ class ExternalString: public String {
static inline ExternalString* cast(Object* obj);
// Layout description.
- static const int kResourceOffset = String::kSize;
+ static const int kResourceOffset = POINTER_SIZE_ALIGN(String::kSize);
static const int kSize = kResourceOffset + kPointerSize;
private:
@@ -4153,7 +4224,7 @@ class ObjectTemplateInfo: public TemplateInfo {
static const int kConstructorOffset = TemplateInfo::kHeaderSize;
static const int kInternalFieldCountOffset =
kConstructorOffset + kPointerSize;
- static const int kSize = kInternalFieldCountOffset + kHeaderSize;
+ static const int kSize = kInternalFieldCountOffset + kPointerSize;
};
diff --git a/deps/v8/src/oprofile-agent.cc b/deps/v8/src/oprofile-agent.cc
index e9f7d3e2f..c4595b40a 100644
--- a/deps/v8/src/oprofile-agent.cc
+++ b/deps/v8/src/oprofile-agent.cc
@@ -29,7 +29,8 @@
#include "oprofile-agent.h"
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
#ifdef ENABLE_OPROFILE_AGENT
op_agent_t OProfileAgent::handle_ = NULL;
diff --git a/deps/v8/src/oprofile-agent.h b/deps/v8/src/oprofile-agent.h
index 75cfe18a2..4c299bfbc 100644
--- a/deps/v8/src/oprofile-agent.h
+++ b/deps/v8/src/oprofile-agent.h
@@ -39,7 +39,8 @@
#include <opagent.h> // NOLINT
#endif
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
class OProfileAgent {
public:
diff --git a/deps/v8/src/parser.cc b/deps/v8/src/parser.cc
index 9db10cf2c..271c3fd16 100644
--- a/deps/v8/src/parser.cc
+++ b/deps/v8/src/parser.cc
@@ -30,20 +30,64 @@
#include "api.h"
#include "ast.h"
#include "bootstrapper.h"
+#include "compiler.h"
#include "platform.h"
#include "runtime.h"
#include "parser.h"
#include "scopes.h"
#include "string-stream.h"
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
class ParserFactory;
class ParserLog;
class TemporaryScope;
+class Target;
+
template <typename T> class ZoneListWrapper;
+// PositionStack is used for on-stack allocation of token positions for
+// new expressions. Please look at ParseNewExpression.
+
+class PositionStack {
+ public:
+ explicit PositionStack(bool* ok) : top_(NULL), ok_(ok) {}
+ ~PositionStack() { ASSERT(!*ok_ || is_empty()); }
+
+ class Element {
+ public:
+ Element(PositionStack* stack, int value) {
+ previous_ = stack->top();
+ value_ = value;
+ stack->set_top(this);
+ }
+
+ private:
+ Element* previous() { return previous_; }
+ int value() { return value_; }
+ friend class PositionStack;
+ Element* previous_;
+ int value_;
+ };
+
+ bool is_empty() { return top_ == NULL; }
+ int pop() {
+ ASSERT(!is_empty());
+ int result = top_->value();
+ top_ = top_->previous();
+ return result;
+ }
+
+ private:
+ Element* top() { return top_; }
+ void set_top(Element* value) { top_ = value; }
+ Element* top_;
+ bool* ok_;
+};
+
+
class Parser {
public:
Parser(Handle<Script> script, bool allow_natives_syntax,
@@ -92,7 +136,8 @@ class Parser {
TemporaryScope* temp_scope_;
Mode mode_;
- List<Node*>* target_stack_; // for break, continue statements
+
+ Target* target_stack_; // for break, continue statements
bool allow_natives_syntax_;
v8::Extension* extension_;
ParserFactory* factory_;
@@ -149,7 +194,8 @@ class Parser {
Expression* ParseLeftHandSideExpression(bool* ok);
Expression* ParseNewExpression(bool* ok);
Expression* ParseMemberExpression(bool* ok);
- Expression* ParseMemberWithNewPrefixesExpression(List<int>* new_prefixes,
+ Expression* ParseNewPrefix(PositionStack* stack, bool* ok);
+ Expression* ParseMemberWithNewPrefixesExpression(PositionStack* stack,
bool* ok);
Expression* ParsePrimaryExpression(bool* ok);
Expression* ParseArrayLiteral(bool* ok);
@@ -207,7 +253,7 @@ class Parser {
BreakableStatement* LookupBreakTarget(Handle<String> label, bool* ok);
IterationStatement* LookupContinueTarget(Handle<String> label, bool* ok);
- void RegisterTargetUse(BreakTarget* target, int index);
+ void RegisterTargetUse(BreakTarget* target, Target* stop);
// Create a number literal.
Literal* NewNumberLiteral(double value);
@@ -970,35 +1016,39 @@ VariableProxy* PreParser::Declare(Handle<String> name, Variable::Mode mode,
class Target BASE_EMBEDDED {
public:
- Target(Parser* parser, Node* node) : parser_(parser) {
- parser_->target_stack_->Add(node);
+ Target(Parser* parser, Node* node)
+ : parser_(parser), node_(node), previous_(parser_->target_stack_) {
+ parser_->target_stack_ = this;
}
~Target() {
- parser_->target_stack_->RemoveLast();
+ parser_->target_stack_ = previous_;
}
+ Target* previous() { return previous_; }
+ Node* node() { return node_; }
+
private:
Parser* parser_;
+ Node* node_;
+ Target* previous_;
};
class TargetScope BASE_EMBEDDED {
public:
explicit TargetScope(Parser* parser)
- : parser_(parser), previous_(parser->target_stack_), stack_(0) {
- parser_->target_stack_ = &stack_;
+ : parser_(parser), previous_(parser->target_stack_) {
+ parser->target_stack_ = NULL;
}
~TargetScope() {
- ASSERT(stack_.is_empty());
parser_->target_stack_ = previous_;
}
private:
Parser* parser_;
- List<Node*>* previous_;
- List<Node*> stack_;
+ Target* previous_;
};
@@ -1096,7 +1146,7 @@ bool Parser::PreParseProgram(unibrow::CharacterStream* stream) {
FunctionLiteral* Parser::ParseProgram(Handle<String> source,
unibrow::CharacterStream* stream,
bool in_global_context) {
- ZoneScope zone_scope(DONT_DELETE_ON_EXIT);
+ CompilationZoneScope zone_scope(DONT_DELETE_ON_EXIT);
HistogramTimerScope timer(&Counters::parse);
Counters::total_parse_size.Increment(source->length());
@@ -1149,7 +1199,7 @@ FunctionLiteral* Parser::ParseLazy(Handle<String> source,
Handle<String> name,
int start_position,
bool is_expression) {
- ZoneScope zone_scope(DONT_DELETE_ON_EXIT);
+ CompilationZoneScope zone_scope(DONT_DELETE_ON_EXIT);
HistogramTimerScope timer(&Counters::parse_lazy);
source->TryFlattenIfNotFlat();
Counters::total_parse_size.Increment(source->length());
@@ -2791,7 +2841,8 @@ Expression* Parser::ParseLeftHandSideExpression(bool* ok) {
}
-Expression* Parser::ParseNewExpression(bool* ok) {
+
+Expression* Parser::ParseNewPrefix(PositionStack* stack, bool* ok) {
// NewExpression ::
// ('new')+ MemberExpression
@@ -2803,32 +2854,37 @@ Expression* Parser::ParseNewExpression(bool* ok) {
// many we have parsed. This information is then passed on to the
// member expression parser, which is only allowed to match argument
// lists as long as it has 'new' prefixes left
- List<int> new_positions(4);
- while (peek() == Token::NEW) {
- Consume(Token::NEW);
- new_positions.Add(scanner().location().beg_pos);
+ Expect(Token::NEW, CHECK_OK);
+ PositionStack::Element pos(stack, scanner().location().beg_pos);
+
+ Expression* result;
+ if (peek() == Token::NEW) {
+ result = ParseNewPrefix(stack, CHECK_OK);
+ } else {
+ result = ParseMemberWithNewPrefixesExpression(stack, CHECK_OK);
}
- ASSERT(new_positions.length() > 0);
- Expression* result =
- ParseMemberWithNewPrefixesExpression(&new_positions, CHECK_OK);
- while (!new_positions.is_empty()) {
- int last = new_positions.RemoveLast();
+ if (!stack->is_empty()) {
+ int last = stack->pop();
result = NEW(CallNew(result, new ZoneList<Expression*>(0), last));
}
return result;
}
+Expression* Parser::ParseNewExpression(bool* ok) {
+ PositionStack stack(ok);
+ return ParseNewPrefix(&stack, ok);
+}
+
+
Expression* Parser::ParseMemberExpression(bool* ok) {
- static List<int> new_positions(0);
- return ParseMemberWithNewPrefixesExpression(&new_positions, ok);
+ return ParseMemberWithNewPrefixesExpression(NULL, ok);
}
-Expression* Parser::ParseMemberWithNewPrefixesExpression(
- List<int>* new_positions,
- bool* ok) {
+Expression* Parser::ParseMemberWithNewPrefixesExpression(PositionStack* stack,
+ bool* ok) {
// MemberExpression ::
// (PrimaryExpression | FunctionLiteral)
// ('[' Expression ']' | '.' Identifier | Arguments)*
@@ -2864,10 +2920,10 @@ Expression* Parser::ParseMemberWithNewPrefixesExpression(
break;
}
case Token::LPAREN: {
- if (new_positions->is_empty()) return result;
+ if ((stack == NULL) || stack->is_empty()) return result;
// Consume one of the new prefixes (already parsed).
ZoneList<Expression*>* args = ParseArguments(CHECK_OK);
- int last = new_positions->RemoveLast();
+ int last = stack->pop();
result = NEW(CallNew(result, args, last));
break;
}
@@ -3547,8 +3603,8 @@ Handle<String> Parser::ParseIdentifierOrGetOrSet(bool* is_get,
bool Parser::TargetStackContainsLabel(Handle<String> label) {
- for (int i = target_stack_->length(); i-- > 0;) {
- BreakableStatement* stat = target_stack_->at(i)->AsBreakableStatement();
+ for (Target* t = target_stack_; t != NULL; t = t->previous()) {
+ BreakableStatement* stat = t->node()->AsBreakableStatement();
if (stat != NULL && ContainsLabel(stat->labels(), label))
return true;
}
@@ -3558,13 +3614,12 @@ bool Parser::TargetStackContainsLabel(Handle<String> label) {
BreakableStatement* Parser::LookupBreakTarget(Handle<String> label, bool* ok) {
bool anonymous = label.is_null();
- for (int i = target_stack_->length(); i-- > 0;) {
- BreakableStatement* stat = target_stack_->at(i)->AsBreakableStatement();
+ for (Target* t = target_stack_; t != NULL; t = t->previous()) {
+ BreakableStatement* stat = t->node()->AsBreakableStatement();
if (stat == NULL) continue;
-
if ((anonymous && stat->is_target_for_anonymous()) ||
(!anonymous && ContainsLabel(stat->labels(), label))) {
- RegisterTargetUse(stat->break_target(), i);
+ RegisterTargetUse(stat->break_target(), t->previous());
return stat;
}
}
@@ -3575,13 +3630,13 @@ BreakableStatement* Parser::LookupBreakTarget(Handle<String> label, bool* ok) {
IterationStatement* Parser::LookupContinueTarget(Handle<String> label,
bool* ok) {
bool anonymous = label.is_null();
- for (int i = target_stack_->length(); i-- > 0;) {
- IterationStatement* stat = target_stack_->at(i)->AsIterationStatement();
+ for (Target* t = target_stack_; t != NULL; t = t->previous()) {
+ IterationStatement* stat = t->node()->AsIterationStatement();
if (stat == NULL) continue;
ASSERT(stat->is_target_for_anonymous());
if (anonymous || ContainsLabel(stat->labels(), label)) {
- RegisterTargetUse(stat->continue_target(), i);
+ RegisterTargetUse(stat->continue_target(), t->previous());
return stat;
}
}
@@ -3589,12 +3644,12 @@ IterationStatement* Parser::LookupContinueTarget(Handle<String> label,
}
-void Parser::RegisterTargetUse(BreakTarget* target, int index) {
- // Register that a break target found at the given index in the
+void Parser::RegisterTargetUse(BreakTarget* target, Target* stop) {
+ // Register that a break target found at the given stop in the
// target stack has been used from the top of the target stack. Add
// the break target to any TargetCollectors passed on the stack.
- for (int i = target_stack_->length(); i-- > index;) {
- TargetCollector* collector = target_stack_->at(i)->AsTargetCollector();
+ for (Target* t = target_stack_; t != stop; t = t->previous()) {
+ TargetCollector* collector = t->node()->AsTargetCollector();
if (collector != NULL) collector->AddTarget(target);
}
}
diff --git a/deps/v8/src/parser.h b/deps/v8/src/parser.h
index 4c1401cf4..c029c4b25 100644
--- a/deps/v8/src/parser.h
+++ b/deps/v8/src/parser.h
@@ -31,7 +31,8 @@
#include "scanner.h"
#include "allocation.h"
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
class ParserMessage : public Malloced {
diff --git a/deps/v8/src/platform-freebsd.cc b/deps/v8/src/platform-freebsd.cc
index 82208f1a3..acef74cc5 100644
--- a/deps/v8/src/platform-freebsd.cc
+++ b/deps/v8/src/platform-freebsd.cc
@@ -55,7 +55,8 @@
#include "platform.h"
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
// 0 is never a valid thread id on FreeBSD since tids and pids share a
// name space and pid 0 is used to kill the group (see man 2 kill).
diff --git a/deps/v8/src/platform-linux.cc b/deps/v8/src/platform-linux.cc
index c02eebc3b..79ffe8149 100644
--- a/deps/v8/src/platform-linux.cc
+++ b/deps/v8/src/platform-linux.cc
@@ -58,7 +58,8 @@
#include "platform.h"
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
// 0 is never a valid thread id on Linux since tids and pids share a
// name space and pid 0 is reserved (see man 2 kill).
@@ -87,8 +88,15 @@ double OS::nan_value() {
int OS::ActivationFrameAlignment() {
- // Floating point code runs faster if the stack is 8-byte aligned.
+#ifdef V8_TARGET_ARCH_ARM
+ // On EABI ARM targets this is required for fp correctness in the
+ // runtime system.
return 8;
+#else
+ // With gcc 4.4 the tree vectorization optimiser can generate code
+ // that requires 16 byte alignment such as movdqa on x86.
+ return 16;
+#endif
}
@@ -609,9 +617,16 @@ static void ProfilerSignalHandler(int signal, siginfo_t* info, void* context) {
sample.sp = mcontext.gregs[REG_RSP];
sample.fp = mcontext.gregs[REG_RBP];
#elif V8_HOST_ARCH_ARM
+// An undefined macro evaluates to 0, so this applies to Android's Bionic also.
+#if (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3))
sample.pc = mcontext.gregs[R15];
sample.sp = mcontext.gregs[R13];
sample.fp = mcontext.gregs[R11];
+#else
+ sample.pc = mcontext.arm_pc;
+ sample.sp = mcontext.arm_sp;
+ sample.fp = mcontext.arm_fp;
+#endif
#endif
}
diff --git a/deps/v8/src/platform-macos.cc b/deps/v8/src/platform-macos.cc
index 79515434e..3e0e2841b 100644
--- a/deps/v8/src/platform-macos.cc
+++ b/deps/v8/src/platform-macos.cc
@@ -58,7 +58,8 @@
#include "platform.h"
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
// 0 is never a valid thread id on MacOSX since a ptread_t is
// a pointer.
@@ -481,6 +482,13 @@ static void ProfilerSignalHandler(int signal, siginfo_t* info, void* context) {
// Extracting the sample from the context is extremely machine dependent.
ucontext_t* ucontext = reinterpret_cast<ucontext_t*>(context);
mcontext_t& mcontext = ucontext->uc_mcontext;
+#if V8_HOST_ARCH_X64
+ UNIMPLEMENTED();
+ USE(mcontext);
+ sample.pc = 0;
+ sample.sp = 0;
+ sample.fp = 0;
+#elif V8_HOST_ARCH_IA32
#if __DARWIN_UNIX03
sample.pc = mcontext->__ss.__eip;
sample.sp = mcontext->__ss.__esp;
@@ -490,6 +498,9 @@ static void ProfilerSignalHandler(int signal, siginfo_t* info, void* context) {
sample.sp = mcontext->ss.esp;
sample.fp = mcontext->ss.ebp;
#endif // __DARWIN_UNIX03
+#else
+#error Unsupported Mac OS X host architecture.
+#endif // V8_TARGET_ARCH_IA32
}
// We always sample the VM state.
diff --git a/deps/v8/src/platform-nullos.cc b/deps/v8/src/platform-nullos.cc
index 42583f17f..60ae76d6b 100644
--- a/deps/v8/src/platform-nullos.cc
+++ b/deps/v8/src/platform-nullos.cc
@@ -37,7 +37,8 @@
#include "platform.h"
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
// Give V8 the opportunity to override the default ceil behaviour.
double ceiling(double x) {
diff --git a/deps/v8/src/platform-posix.cc b/deps/v8/src/platform-posix.cc
index de16ef514..d628a5148 100644
--- a/deps/v8/src/platform-posix.cc
+++ b/deps/v8/src/platform-posix.cc
@@ -47,7 +47,8 @@
#include "platform.h"
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
// ----------------------------------------------------------------------------
diff --git a/deps/v8/src/platform-win32.cc b/deps/v8/src/platform-win32.cc
index 6c4e67a7f..1b0f9b24d 100644
--- a/deps/v8/src/platform-win32.cc
+++ b/deps/v8/src/platform-win32.cc
@@ -58,6 +58,12 @@
#include <time.h> // For LocalOffset() implementation.
#include <mmsystem.h> // For timeGetTime().
+#ifdef __MINGW32__
+// Require Windows XP or higher when compiling with MinGW. This is for MinGW
+// header files to expose getaddrinfo.
+#undef _WIN32_WINNT
+#define _WIN32_WINNT 0x501
+#endif // __MINGW32__
#ifndef __MINGW32__
#include <dbghelp.h> // For SymLoadModule64 and al.
#endif // __MINGW32__
@@ -210,7 +216,8 @@ int random() {
}
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
double ceiling(double x) {
return ceil(x);
diff --git a/deps/v8/src/platform.h b/deps/v8/src/platform.h
index e23abfc37..4522c7403 100644
--- a/deps/v8/src/platform.h
+++ b/deps/v8/src/platform.h
@@ -60,7 +60,8 @@ enum {
#define INFINITY HUGE_VAL
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
int isfinite(double x);
} }
int isnan(double x);
@@ -105,7 +106,8 @@ int random();
#endif // __GNUC__
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
double ceiling(double x);
@@ -491,7 +493,7 @@ class Socket {
// TickSample captures the information collected for each sample.
class TickSample {
public:
- TickSample() : pc(0), sp(0), fp(0), state(OTHER) {}
+ TickSample() : pc(0), sp(0), fp(0), state(OTHER), frames_count(0) {}
uintptr_t pc; // Instruction pointer.
uintptr_t sp; // Stack pointer.
uintptr_t fp; // Frame pointer.
@@ -518,10 +520,11 @@ class Sampler {
// Is the sampler used for profiling.
inline bool IsProfiling() { return profiling_; }
- class PlatformData;
- protected:
+ // Whether the sampler is running (that is, consumes resources).
inline bool IsActive() { return active_; }
+ class PlatformData;
+
private:
int interval_;
bool profiling_;
diff --git a/deps/v8/src/prettyprinter.cc b/deps/v8/src/prettyprinter.cc
index b58000a8f..79f1883ef 100644
--- a/deps/v8/src/prettyprinter.cc
+++ b/deps/v8/src/prettyprinter.cc
@@ -33,7 +33,8 @@
#include "scopes.h"
#include "platform.h"
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
#ifdef DEBUG
@@ -692,10 +693,10 @@ void AstPrinter::PrintLabelsIndented(const char* info, ZoneStringList* labels) {
Print(" ");
}
PrintLabels(labels);
- Print("\n");
} else if (info != NULL) {
PrintIndented(info);
}
+ Print("\n");
}
@@ -917,9 +918,8 @@ void AstPrinter::VisitLiteral(Literal* node) {
void AstPrinter::VisitRegExpLiteral(RegExpLiteral* node) {
IndentedScope indent("REGEXP LITERAL");
- PrintLiteral(node->pattern(), false);
- Print(",");
- PrintLiteral(node->flags(), false);
+ PrintLiteralIndented("PATTERN", node->pattern(), false);
+ PrintLiteralIndented("FLAGS", node->flags(), false);
}
diff --git a/deps/v8/src/prettyprinter.h b/deps/v8/src/prettyprinter.h
index 720fe7b4c..bfce9b033 100644
--- a/deps/v8/src/prettyprinter.h
+++ b/deps/v8/src/prettyprinter.h
@@ -30,7 +30,8 @@
#include "ast.h"
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
#ifdef DEBUG
diff --git a/deps/v8/src/property.cc b/deps/v8/src/property.cc
index 6c2153008..2915c4abc 100644
--- a/deps/v8/src/property.cc
+++ b/deps/v8/src/property.cc
@@ -27,7 +27,8 @@
#include "v8.h"
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
void DescriptorWriter::Write(Descriptor* desc) {
diff --git a/deps/v8/src/property.h b/deps/v8/src/property.h
index 60a9b544d..edab97ab0 100644
--- a/deps/v8/src/property.h
+++ b/deps/v8/src/property.h
@@ -28,7 +28,8 @@
#ifndef V8_PROPERTY_H_
#define V8_PROPERTY_H_
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
// Abstraction for elements in instance-descriptor arrays.
diff --git a/deps/v8/src/regexp-macro-assembler-irregexp-inl.h b/deps/v8/src/regexp-macro-assembler-irregexp-inl.h
index fa4c3d16b..5074f210a 100644
--- a/deps/v8/src/regexp-macro-assembler-irregexp-inl.h
+++ b/deps/v8/src/regexp-macro-assembler-irregexp-inl.h
@@ -35,7 +35,8 @@
#ifndef V8_REGEXP_MACRO_ASSEMBLER_IRREGEXP_INL_H_
#define V8_REGEXP_MACRO_ASSEMBLER_IRREGEXP_INL_H_
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
void RegExpMacroAssemblerIrregexp::Emit(uint32_t byte,
diff --git a/deps/v8/src/regexp-macro-assembler-irregexp.cc b/deps/v8/src/regexp-macro-assembler-irregexp.cc
index 436db35d8..b87c51f90 100644
--- a/deps/v8/src/regexp-macro-assembler-irregexp.cc
+++ b/deps/v8/src/regexp-macro-assembler-irregexp.cc
@@ -33,7 +33,8 @@
#include "regexp-macro-assembler-irregexp-inl.h"
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
RegExpMacroAssemblerIrregexp::RegExpMacroAssemblerIrregexp(Vector<byte> buffer)
diff --git a/deps/v8/src/regexp-macro-assembler-irregexp.h b/deps/v8/src/regexp-macro-assembler-irregexp.h
index 9ed82e396..597046c4c 100644
--- a/deps/v8/src/regexp-macro-assembler-irregexp.h
+++ b/deps/v8/src/regexp-macro-assembler-irregexp.h
@@ -28,7 +28,8 @@
#ifndef V8_REGEXP_MACRO_ASSEMBLER_IRREGEXP_H_
#define V8_REGEXP_MACRO_ASSEMBLER_IRREGEXP_H_
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
class RegExpMacroAssemblerIrregexp: public RegExpMacroAssembler {
diff --git a/deps/v8/src/regexp-macro-assembler-tracer.cc b/deps/v8/src/regexp-macro-assembler-tracer.cc
index 74345d853..30eb485e3 100644
--- a/deps/v8/src/regexp-macro-assembler-tracer.cc
+++ b/deps/v8/src/regexp-macro-assembler-tracer.cc
@@ -30,7 +30,8 @@
#include "regexp-macro-assembler.h"
#include "regexp-macro-assembler-tracer.h"
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
RegExpMacroAssemblerTracer::RegExpMacroAssemblerTracer(
RegExpMacroAssembler* assembler) :
diff --git a/deps/v8/src/regexp-macro-assembler-tracer.h b/deps/v8/src/regexp-macro-assembler-tracer.h
index f25289e6e..0fd73f3d6 100644
--- a/deps/v8/src/regexp-macro-assembler-tracer.h
+++ b/deps/v8/src/regexp-macro-assembler-tracer.h
@@ -28,7 +28,8 @@
#ifndef V8_REGEXP_MACRO_ASSEMBLER_TRACER_H_
#define V8_REGEXP_MACRO_ASSEMBLER_TRACER_H_
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
// Decorator on a RegExpMacroAssembler that write all calls.
class RegExpMacroAssemblerTracer: public RegExpMacroAssembler {
diff --git a/deps/v8/src/regexp-macro-assembler.cc b/deps/v8/src/regexp-macro-assembler.cc
index 913ac6462..8dede304e 100644
--- a/deps/v8/src/regexp-macro-assembler.cc
+++ b/deps/v8/src/regexp-macro-assembler.cc
@@ -31,7 +31,8 @@
#include "assembler.h"
#include "regexp-macro-assembler.h"
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
RegExpMacroAssembler::RegExpMacroAssembler() {
}
diff --git a/deps/v8/src/regexp-macro-assembler.h b/deps/v8/src/regexp-macro-assembler.h
index a3f398d8a..484986428 100644
--- a/deps/v8/src/regexp-macro-assembler.h
+++ b/deps/v8/src/regexp-macro-assembler.h
@@ -28,7 +28,8 @@
#ifndef V8_REGEXP_MACRO_ASSEMBLER_H_
#define V8_REGEXP_MACRO_ASSEMBLER_H_
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
struct DisjunctDecisionRow {
RegExpCharacterClass cc;
diff --git a/deps/v8/src/regexp-stack.cc b/deps/v8/src/regexp-stack.cc
index 05daa58d4..83cb6e4ef 100644
--- a/deps/v8/src/regexp-stack.cc
+++ b/deps/v8/src/regexp-stack.cc
@@ -29,7 +29,8 @@
#include "top.h"
#include "regexp-stack.h"
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
RegExpStack::RegExpStack() {
// Initialize, if not already initialized.
diff --git a/deps/v8/src/regexp-stack.h b/deps/v8/src/regexp-stack.h
index b955e76a0..6c090daa6 100644
--- a/deps/v8/src/regexp-stack.h
+++ b/deps/v8/src/regexp-stack.h
@@ -28,7 +28,8 @@
#ifndef V8_REGEXP_STACK_H_
#define V8_REGEXP_STACK_H_
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
// Maintains a per-v8thread stack area that can be used by irregexp
// implementation for its backtracking stack.
diff --git a/deps/v8/src/register-allocator-inl.h b/deps/v8/src/register-allocator-inl.h
index 9e745b52e..8fb498b7f 100644
--- a/deps/v8/src/register-allocator-inl.h
+++ b/deps/v8/src/register-allocator-inl.h
@@ -28,19 +28,44 @@
#ifndef V8_REGISTER_ALLOCATOR_INL_H_
#define V8_REGISTER_ALLOCATOR_INL_H_
+#include "codegen.h"
#include "register-allocator.h"
#include "virtual-frame.h"
-namespace v8 { namespace internal {
+#if V8_TARGET_ARCH_IA32
+#include "ia32/register-allocator-ia32-inl.h"
+#elif V8_TARGET_ARCH_X64
+#include "x64/register-allocator-x64-inl.h"
+#elif V8_TARGET_ARCH_ARM
+#include "arm/register-allocator-arm-inl.h"
+#else
+#error Unsupported target architecture.
+#endif
+
+
+namespace v8 {
+namespace internal {
Result::~Result() {
- if (is_register()) cgen_->allocator()->Unuse(reg());
+ if (is_register()) {
+ CodeGeneratorScope::Current()->allocator()->Unuse(reg());
+ }
}
void Result::Unuse() {
- if (is_register()) cgen_->allocator()->Unuse(reg());
- type_ = INVALID;
+ if (is_register()) {
+ CodeGeneratorScope::Current()->allocator()->Unuse(reg());
+ }
+ invalidate();
+}
+
+
+void Result::CopyTo(Result* destination) const {
+ destination->value_ = value_;
+ if (is_register()) {
+ CodeGeneratorScope::Current()->allocator()->Use(reg());
+ }
}
diff --git a/deps/v8/src/register-allocator.cc b/deps/v8/src/register-allocator.cc
index 94e031fa0..2599232ce 100644
--- a/deps/v8/src/register-allocator.cc
+++ b/deps/v8/src/register-allocator.cc
@@ -30,54 +30,28 @@
#include "codegen-inl.h"
#include "register-allocator-inl.h"
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
// -------------------------------------------------------------------------
// Result implementation.
-Result::Result(Register reg, CodeGenerator* cgen)
- : static_type_(),
- type_(REGISTER),
- cgen_(cgen) {
- data_.reg_ = reg;
- ASSERT(reg.is_valid());
- cgen_->allocator()->Use(reg);
-}
-
-
-Result::Result(Register reg, CodeGenerator* cgen, StaticType static_type)
- : static_type_(static_type),
- type_(REGISTER),
- cgen_(cgen) {
- data_.reg_ = reg;
- ASSERT(reg.is_valid());
- cgen_->allocator()->Use(reg);
-}
-
-void Result::CopyTo(Result* destination) const {
- destination->static_type_ = static_type_;
- destination->type_ = type();
- destination->cgen_ = cgen_;
-
- if (is_register()) {
- destination->data_.reg_ = reg();
- cgen_->allocator()->Use(reg());
- } else if (is_constant()) {
- destination->data_.handle_ = data_.handle_;
- } else {
- ASSERT(!is_valid());
- }
+Result::Result(Register reg) {
+ ASSERT(reg.is_valid() && !RegisterAllocator::IsReserved(reg));
+ CodeGeneratorScope::Current()->allocator()->Use(reg);
+ value_ = StaticTypeField::encode(StaticType::UNKNOWN_TYPE)
+ | TypeField::encode(REGISTER)
+ | DataField::encode(reg.code_);
}
-// -------------------------------------------------------------------------
-// RegisterFile implementation.
-
-void RegisterFile::CopyTo(RegisterFile* other) {
- for (int i = 0; i < kNumRegisters; i++) {
- other->ref_counts_[i] = ref_counts_[i];
- }
+Result::Result(Register reg, StaticType type) {
+ ASSERT(reg.is_valid() && !RegisterAllocator::IsReserved(reg));
+ CodeGeneratorScope::Current()->allocator()->Use(reg);
+ value_ = StaticTypeField::encode(type.static_type_)
+ | TypeField::encode(REGISTER)
+ | DataField::encode(reg.code_);
}
@@ -87,12 +61,11 @@ void RegisterFile::CopyTo(RegisterFile* other) {
Result RegisterAllocator::AllocateWithoutSpilling() {
// Return the first free register, if any.
- int free_reg = registers_.ScanForFreeRegister();
- if (free_reg < kNumRegisters) {
- Register free_result = { free_reg };
- return Result(free_result, cgen_);
+ int num = registers_.ScanForFreeRegister();
+ if (num == RegisterAllocator::kInvalidRegister) {
+ return Result();
}
- return Result(cgen_);
+ return Result(RegisterAllocator::ToRegister(num));
}
@@ -104,7 +77,7 @@ Result RegisterAllocator::Allocate() {
Register free_reg = cgen_->frame()->SpillAnyRegister();
if (free_reg.is_valid()) {
ASSERT(!is_used(free_reg));
- return Result(free_reg, cgen_);
+ return Result(free_reg);
}
}
return result;
@@ -114,7 +87,7 @@ Result RegisterAllocator::Allocate() {
Result RegisterAllocator::Allocate(Register target) {
// If the target is not referenced, it can simply be allocated.
if (!is_used(target)) {
- return Result(target, cgen_);
+ return Result(target);
}
// If the target is only referenced in the frame, it can be spilled and
// then allocated.
@@ -122,10 +95,10 @@ Result RegisterAllocator::Allocate(Register target) {
if (cgen_->frame()->is_used(target) && count(target) == 1) {
cgen_->frame()->Spill(target);
ASSERT(!is_used(target));
- return Result(target, cgen_);
+ return Result(target);
}
// Otherwise (if it's referenced outside the frame) we cannot allocate it.
- return Result(cgen_);
+ return Result();
}
diff --git a/deps/v8/src/register-allocator.h b/deps/v8/src/register-allocator.h
index f79d6cfdf..c5391918d 100644
--- a/deps/v8/src/register-allocator.h
+++ b/deps/v8/src/register-allocator.h
@@ -30,7 +30,18 @@
#include "macro-assembler.h"
-namespace v8 { namespace internal {
+#if V8_TARGET_ARCH_IA32
+#include "ia32/register-allocator-ia32.h"
+#elif V8_TARGET_ARCH_X64
+#include "x64/register-allocator-x64.h"
+#elif V8_TARGET_ARCH_ARM
+#include "arm/register-allocator-arm.h"
+#else
+#error Unsupported target architecture.
+#endif
+
+namespace v8 {
+namespace internal {
// -------------------------------------------------------------------------
@@ -100,7 +111,10 @@ class StaticType BASE_EMBEDDED {
explicit StaticType(StaticTypeEnum static_type) : static_type_(static_type) {}
// StaticTypeEnum static_type_;
- byte static_type_;
+ StaticTypeEnum static_type_;
+
+ friend class FrameElement;
+ friend class Result;
};
@@ -119,26 +133,20 @@ class Result BASE_EMBEDDED {
};
// Construct an invalid result.
- explicit Result(CodeGenerator* cgen)
- : static_type_(),
- type_(INVALID),
- cgen_(cgen) {}
+ Result() { invalidate(); }
// Construct a register Result.
- Result(Register reg,
- CodeGenerator* cgen);
+ explicit Result(Register reg);
// Construct a register Result with a known static type.
- Result(Register reg,
- CodeGenerator* cgen,
- StaticType static_type);
+ Result(Register reg, StaticType static_type);
// Construct a Result whose value is a compile-time constant.
- Result(Handle<Object> value, CodeGenerator * cgen)
- : static_type_(StaticType::TypeOf(*value)),
- type_(CONSTANT),
- cgen_(cgen) {
- data_.handle_ = value.location();
+ explicit Result(Handle<Object> value) {
+ value_ = StaticTypeField::encode(StaticType::TypeOf(*value).static_type_)
+ | TypeField::encode(CONSTANT)
+ | DataField::encode(ConstantList()->length());
+ ConstantList()->Add(value);
}
// The copy constructor and assignment operators could each create a new
@@ -157,25 +165,51 @@ class Result BASE_EMBEDDED {
inline ~Result();
+ // Static indirection table for handles to constants. If a Result
+ // represents a constant, the data contains an index into this table
+ // of handles to the actual constants.
+ typedef ZoneList<Handle<Object> > ZoneObjectList;
+
+ static ZoneObjectList* ConstantList() {
+ static ZoneObjectList list(10);
+ return &list;
+ }
+
+ // Clear the constants indirection table.
+ static void ClearConstantList() {
+ ConstantList()->Clear();
+ }
+
inline void Unuse();
- StaticType static_type() const { return static_type_; }
- void set_static_type(StaticType static_type) { static_type_ = static_type; }
+ StaticType static_type() const {
+ return StaticType(StaticTypeField::decode(value_));
+ }
- Type type() const { return static_cast<Type>(type_); }
+ void set_static_type(StaticType type) {
+ value_ = value_ & ~StaticTypeField::mask();
+ value_ = value_ | StaticTypeField::encode(type.static_type_);
+ }
+
+ Type type() const { return TypeField::decode(value_); }
+
+ void invalidate() { value_ = TypeField::encode(INVALID); }
bool is_valid() const { return type() != INVALID; }
bool is_register() const { return type() == REGISTER; }
bool is_constant() const { return type() == CONSTANT; }
Register reg() const {
- ASSERT(type() == REGISTER);
- return data_.reg_;
+ ASSERT(is_register());
+ uint32_t reg = DataField::decode(value_);
+ Register result;
+ result.code_ = reg;
+ return result;
}
Handle<Object> handle() const {
ASSERT(type() == CONSTANT);
- return Handle<Object>(data_.handle_);
+ return ConstantList()->at(DataField::decode(value_));
}
// Move this result to an arbitrary register. The register is not
@@ -189,17 +223,15 @@ class Result BASE_EMBEDDED {
void ToRegister(Register reg);
private:
- StaticType static_type_;
- byte type_;
+ uint32_t value_;
- union {
- Register reg_;
- Object** handle_;
- } data_;
+ class StaticTypeField: public BitField<StaticType::StaticTypeEnum, 0, 3> {};
+ class TypeField: public BitField<Type, 3, 2> {};
+ class DataField: public BitField<uint32_t, 5, 32 - 6> {};
- CodeGenerator* cgen_;
+ inline void CopyTo(Result* destination) const;
- void CopyTo(Result* destination) const;
+ friend class CodeGeneratorScope;
};
@@ -219,42 +251,50 @@ class RegisterFile BASE_EMBEDDED {
}
}
- // Predicates and accessors for the reference counts. The versions
- // that take a register code rather than a register are for
- // convenience in loops over the register codes.
- bool is_used(int reg_code) const { return ref_counts_[reg_code] > 0; }
- bool is_used(Register reg) const { return is_used(reg.code()); }
- int count(int reg_code) const { return ref_counts_[reg_code]; }
- int count(Register reg) const { return count(reg.code()); }
+ // Predicates and accessors for the reference counts.
+ bool is_used(int num) {
+ ASSERT(0 <= num && num < kNumRegisters);
+ return ref_counts_[num] > 0;
+ }
+
+ int count(int num) {
+ ASSERT(0 <= num && num < kNumRegisters);
+ return ref_counts_[num];
+ }
// Record a use of a register by incrementing its reference count.
- void Use(Register reg) {
- ref_counts_[reg.code()]++;
+ void Use(int num) {
+ ASSERT(0 <= num && num < kNumRegisters);
+ ref_counts_[num]++;
}
// Record that a register will no longer be used by decrementing its
// reference count.
- void Unuse(Register reg) {
- ASSERT(!reg.is(no_reg));
- ASSERT(is_used(reg.code()));
- ref_counts_[reg.code()]--;
+ void Unuse(int num) {
+ ASSERT(is_used(num));
+ ref_counts_[num]--;
}
// Copy the reference counts from this register file to the other.
- void CopyTo(RegisterFile* other);
+ void CopyTo(RegisterFile* other) {
+ for (int i = 0; i < kNumRegisters; i++) {
+ other->ref_counts_[i] = ref_counts_[i];
+ }
+ }
private:
+ static const int kNumRegisters = RegisterAllocatorConstants::kNumRegisters;
+
int ref_counts_[kNumRegisters];
- // Very fast inlined loop to find a free register.
- // Used in RegisterAllocator::AllocateWithoutSpilling.
- // Returns kNumRegisters if no free register found.
- inline int ScanForFreeRegister() {
- int i = 0;
- for (; i < kNumRegisters ; ++i) {
- if (ref_counts_[i] == 0) break;
+ // Very fast inlined loop to find a free register. Used in
+ // RegisterAllocator::AllocateWithoutSpilling. Returns
+ // kInvalidRegister if no free register found.
+ int ScanForFreeRegister() {
+ for (int i = 0; i < RegisterAllocatorConstants::kNumRegisters; i++) {
+ if (!is_used(i)) return i;
}
- return i;
+ return RegisterAllocatorConstants::kInvalidRegister;
}
friend class RegisterAllocator;
@@ -267,55 +307,62 @@ class RegisterFile BASE_EMBEDDED {
class RegisterAllocator BASE_EMBEDDED {
public:
- explicit RegisterAllocator(CodeGenerator* cgen) : cgen_(cgen) {}
-
- // A register file with each of the reserved registers counted once.
- static RegisterFile Reserved();
+ static const int kNumRegisters =
+ RegisterAllocatorConstants::kNumRegisters;
+ static const int kInvalidRegister =
+ RegisterAllocatorConstants::kInvalidRegister;
- // Unuse all the reserved registers in a register file.
- static void UnuseReserved(RegisterFile* register_file);
+ explicit RegisterAllocator(CodeGenerator* cgen) : cgen_(cgen) {}
// True if the register is reserved by the code generator, false if it
- // can be freely used by the allocator.
- static bool IsReserved(int reg_code);
- static bool IsReserved(Register reg) { return IsReserved(reg); }
+ // can be freely used by the allocator Defined in the
+ // platform-specific XXX-inl.h files..
+ static inline bool IsReserved(Register reg);
+
+ // Convert between (unreserved) assembler registers and allocator
+ // numbers. Defined in the platform-specific XXX-inl.h files.
+ static inline int ToNumber(Register reg);
+ static inline Register ToRegister(int num);
// Predicates and accessors for the registers' reference counts.
- bool is_used(int reg_code) const { return registers_.is_used(reg_code); }
- bool is_used(Register reg) const { return registers_.is_used(reg.code()); }
- int count(int reg_code) const { return registers_.count(reg_code); }
- int count(Register reg) const { return registers_.count(reg.code()); }
+ bool is_used(int num) { return registers_.is_used(num); }
+ bool is_used(Register reg) { return registers_.is_used(ToNumber(reg)); }
+
+ int count(int num) { return registers_.count(num); }
+ int count(Register reg) { return registers_.count(ToNumber(reg)); }
// Explicitly record a reference to a register.
- void Use(Register reg) { registers_.Use(reg); }
+ void Use(int num) { registers_.Use(num); }
+ void Use(Register reg) { registers_.Use(ToNumber(reg)); }
// Explicitly record that a register will no longer be used.
- void Unuse(Register reg) { registers_.Unuse(reg); }
-
- // Initialize the register allocator for entry to a JS function. On
- // entry, the registers used by the JS calling convention are
- // externally referenced (ie, outside the virtual frame); and the
- // other registers are free.
- void Initialize();
+ void Unuse(int num) { registers_.Unuse(num); }
+ void Unuse(Register reg) { registers_.Unuse(ToNumber(reg)); }
// Reset the register reference counts to free all non-reserved registers.
- // A frame-external reference is kept to each of the reserved registers.
- void Reset();
+ void Reset() { registers_.Reset(); }
+
+ // Initialize the register allocator for entry to a JS function. On
+ // entry, the (non-reserved) registers used by the JS calling
+ // convention are referenced and the other (non-reserved) registers
+ // are free.
+ inline void Initialize();
// Allocate a free register and return a register result if possible or
// fail and return an invalid result.
Result Allocate();
- // Allocate a specific register if possible, spilling it from the frame if
- // necessary, or else fail and return an invalid result.
+ // Allocate a specific register if possible, spilling it from the
+ // current frame if necessary, or else fail and return an invalid
+ // result.
Result Allocate(Register target);
- // Allocate a free register without spilling any from the current frame or
- // fail and return an invalid result.
+ // Allocate a free register without spilling any from the current
+ // frame or fail and return an invalid result.
Result AllocateWithoutSpilling();
- // Allocate a free byte register without spilling any from the
- // current frame or fail and return an invalid result.
+ // Allocate a free byte register without spilling any from the current
+ // frame or fail and return an invalid result.
Result AllocateByteRegisterWithoutSpilling();
// Copy the internal state to a register file, to be restored later by
@@ -324,6 +371,7 @@ class RegisterAllocator BASE_EMBEDDED {
registers_.CopyTo(register_file);
}
+ // Restore the internal state.
void RestoreFrom(RegisterFile* register_file) {
register_file->CopyTo(&registers_);
}
diff --git a/deps/v8/src/rewriter.cc b/deps/v8/src/rewriter.cc
index 4e3676b7d..e0a0226ec 100644
--- a/deps/v8/src/rewriter.cc
+++ b/deps/v8/src/rewriter.cc
@@ -32,7 +32,8 @@
#include "scopes.h"
#include "rewriter.h"
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
class AstOptimizer: public AstVisitor {
@@ -803,6 +804,7 @@ void Processor::VisitThisFunction(ThisFunction* node) {
bool Rewriter::Process(FunctionLiteral* function) {
+ HistogramTimerScope timer(&Counters::rewriting);
Scope* scope = function->scope();
if (scope->is_function_scope()) return true;
@@ -823,6 +825,7 @@ bool Rewriter::Optimize(FunctionLiteral* function) {
ZoneList<Statement*>* body = function->body();
if (FLAG_optimize_ast && !body->is_empty()) {
+ HistogramTimerScope timer(&Counters::ast_optimization);
AstOptimizer optimizer(function->name());
optimizer.Optimize(body);
if (optimizer.HasStackOverflow()) {
diff --git a/deps/v8/src/rewriter.h b/deps/v8/src/rewriter.h
index aa2f981de..8943e75aa 100644
--- a/deps/v8/src/rewriter.h
+++ b/deps/v8/src/rewriter.h
@@ -28,7 +28,8 @@
#ifndef V8_REWRITER_H_
#define V8_REWRITER_H_
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
// Currently, the rewriter takes function literals (only top-level)
diff --git a/deps/v8/src/runtime.cc b/deps/v8/src/runtime.cc
index bf6286fe3..78be51292 100644
--- a/deps/v8/src/runtime.cc
+++ b/deps/v8/src/runtime.cc
@@ -46,7 +46,8 @@
#include "smart-pointer.h"
#include "parser.h"
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
#define RUNTIME_ASSERT(value) do { \
@@ -1420,6 +1421,7 @@ class ReplacementStringBuilder {
void AddElement(Object* element) {
ASSERT(element->IsSmi() || element->IsString());
+ ASSERT(parts_->length() > part_count_);
parts_->set(part_count_, element);
part_count_++;
}
@@ -1589,6 +1591,7 @@ class CompiledReplacement {
if (i > last) {
parts->Add(ReplacementPart::ReplacementSubString(last, i));
}
+ ASSERT(capture_ref <= capture_count);
parts->Add(ReplacementPart::SubjectCapture(capture_ref));
last = next_index + 1;
}
@@ -1723,7 +1726,7 @@ static Object* StringReplaceRegExpWithString(String* subject,
int capture_count = regexp_handle->CaptureCount();
// CompiledReplacement uses zone allocation.
- ZoneScope zone(DELETE_ON_EXIT);
+ CompilationZoneScope zone(DELETE_ON_EXIT);
CompiledReplacement compiled_replacement;
compiled_replacement.Compile(replacement_handle,
capture_count,
@@ -2035,7 +2038,7 @@ static int BoyerMooreIndexOf(Vector<const schar> subject,
BoyerMoorePopulateGoodSuffixTable(pattern, start);
pchar last_char = pattern[m - 1];
// Continue search from i.
- do {
+ while (idx <= n - m) {
int j = m - 1;
schar c;
while (last_char != (c = subject[idx + j])) {
@@ -2061,7 +2064,7 @@ static int BoyerMooreIndexOf(Vector<const schar> subject,
}
idx += shift;
}
- } while (idx <= n - m);
+ }
return -1;
}
@@ -2376,7 +2379,7 @@ static Object* Runtime_StringMatch(Arguments args) {
}
int length = subject->length();
- ZoneScope zone_space(DELETE_ON_EXIT);
+ CompilationZoneScope zone_space(DELETE_ON_EXIT);
ZoneList<int> offsets(8);
do {
int start;
@@ -2777,6 +2780,42 @@ Object* Runtime::ForceSetObjectProperty(Handle<JSObject> js_object,
}
+Object* Runtime::ForceDeleteObjectProperty(Handle<JSObject> js_object,
+ Handle<Object> key) {
+ HandleScope scope;
+
+ // Check if the given key is an array index.
+ uint32_t index;
+ if (Array::IndexFromObject(*key, &index)) {
+ // In Firefox/SpiderMonkey, Safari and Opera you can access the
+ // characters of a string using [] notation. In the case of a
+ // String object we just need to redirect the deletion to the
+ // underlying string if the index is in range. Since the
+ // underlying string does nothing with the deletion, we can ignore
+ // such deletions.
+ if (js_object->IsStringObjectWithCharacterAt(index)) {
+ return Heap::true_value();
+ }
+
+ return js_object->DeleteElement(index, JSObject::FORCE_DELETION);
+ }
+
+ Handle<String> key_string;
+ if (key->IsString()) {
+ key_string = Handle<String>::cast(key);
+ } else {
+ // Call-back into JavaScript to convert the key to a string.
+ bool has_pending_exception = false;
+ Handle<Object> converted = Execution::ToString(key, &has_pending_exception);
+ if (has_pending_exception) return Failure::Exception();
+ key_string = Handle<String>::cast(converted);
+ }
+
+ key_string->TryFlattenIfNotFlat();
+ return js_object->DeleteProperty(*key_string, JSObject::FORCE_DELETION);
+}
+
+
static Object* Runtime_SetProperty(Arguments args) {
NoHandleAllocation ha;
RUNTIME_ASSERT(args.length() == 3 || args.length() == 4);
@@ -2828,7 +2867,7 @@ static Object* Runtime_DeleteProperty(Arguments args) {
CONVERT_CHECKED(JSObject, object, args[0]);
CONVERT_CHECKED(String, key, args[1]);
- return object->DeleteProperty(key);
+ return object->DeleteProperty(key, JSObject::NORMAL_DELETION);
}
@@ -4316,9 +4355,15 @@ static Object* Runtime_LazyCompile(Arguments args) {
}
#endif
- // Compile the target function.
+ // Compile the target function. Here we compile using CompileLazyInLoop in
+ // order to get the optimized version. This helps code like delta-blue
+ // that calls performance-critical routines through constructors. A
+ // constructor call doesn't use a CallIC, it uses a LoadIC followed by a
+ // direct call. Since the in-loop tracking takes place through CallICs
+ // this means that things called through constructors are never known to
+ // be in loops. We compile them as if they are in loops here just in case.
ASSERT(!function->is_compiled());
- if (!CompileLazy(function, KEEP_EXCEPTION)) {
+ if (!CompileLazyInLoop(function, KEEP_EXCEPTION)) {
return Failure::Exception();
}
@@ -4356,6 +4401,14 @@ static Object* Runtime_GetFunctionDelegate(Arguments args) {
}
+static Object* Runtime_GetConstructorDelegate(Arguments args) {
+ HandleScope scope;
+ ASSERT(args.length() == 1);
+ RUNTIME_ASSERT(!args[0]->IsJSFunction());
+ return *Execution::GetConstructorDelegate(args.at<Object>(0));
+}
+
+
static Object* Runtime_NewContext(Arguments args) {
NoHandleAllocation ha;
ASSERT(args.length() == 1);
@@ -4442,10 +4495,16 @@ static Object* Runtime_LookupContext(Arguments args) {
// compiler to do the right thing.
//
// TODO(1236026): This is a non-portable hack that should be removed.
+// TODO(x64): Definitely!
typedef uint64_t ObjectPair;
static inline ObjectPair MakePair(Object* x, Object* y) {
+#if V8_HOST_ARCH_64_BIT
+ UNIMPLEMENTED();
+ return 0;
+#else
return reinterpret_cast<uint32_t>(x) |
(reinterpret_cast<ObjectPair>(y) << 32);
+#endif
}
@@ -4882,16 +4941,14 @@ static Object* Runtime_GlobalReceiver(Arguments args) {
static Object* Runtime_CompileString(Arguments args) {
HandleScope scope;
- ASSERT_EQ(3, args.length());
+ ASSERT_EQ(2, args.length());
CONVERT_ARG_CHECKED(String, source, 0);
- CONVERT_ARG_CHECKED(Smi, line_offset, 1);
- CONVERT_ARG_CHECKED(Oddball, is_json, 2)
+ CONVERT_ARG_CHECKED(Oddball, is_json, 1)
// Compile source string in the global context.
Handle<Context> context(Top::context()->global_context());
Handle<JSFunction> boilerplate = Compiler::CompileEval(source,
context,
- line_offset->value(),
true,
is_json->IsTrue());
if (boilerplate.is_null()) return Failure::Exception();
@@ -4918,7 +4975,7 @@ static Object* CompileDirectEval(Handle<String> source) {
// Compile source string in the current context.
Handle<JSFunction> boilerplate =
- Compiler::CompileEval(source, context, 0, is_global, false);
+ Compiler::CompileEval(source, context, is_global, false);
if (boilerplate.is_null()) return Failure::Exception();
Handle<JSFunction> fun =
Factory::NewFunctionFromBoilerplate(boilerplate, context);
@@ -5422,7 +5479,7 @@ static Object* Runtime_DebugBreak(Arguments args) {
// Helper functions for wrapping and unwrapping stack frame ids.
static Smi* WrapFrameId(StackFrame::Id id) {
- ASSERT(IsAligned(OffsetFrom(id), 4));
+ ASSERT(IsAligned(OffsetFrom(id), static_cast<intptr_t>(4)));
return Smi::FromInt(id >> 2);
}
@@ -5471,7 +5528,8 @@ static int LocalPrototypeChainLength(JSObject* obj) {
}
-static Object* DebugLookupResultValue(Object* receiver, LookupResult* result,
+static Object* DebugLookupResultValue(Object* receiver, String* name,
+ LookupResult* result,
bool* caught_exception) {
Object* value;
switch (result->type()) {
@@ -5496,11 +5554,9 @@ static Object* DebugLookupResultValue(Object* receiver, LookupResult* result,
return result->GetConstantFunction();
case CALLBACKS: {
Object* structure = result->GetCallbackObject();
- if (structure->IsProxy()) {
- AccessorDescriptor* callback =
- reinterpret_cast<AccessorDescriptor*>(
- Proxy::cast(structure)->proxy());
- value = (callback->getter)(receiver, callback->data);
+ if (structure->IsProxy() || structure->IsAccessorInfo()) {
+ value = receiver->GetPropertyWithCallback(
+ receiver, structure, name, result->holder());
if (value->IsException()) {
value = Top::pending_exception();
Top::clear_pending_exception();
@@ -5546,6 +5602,17 @@ static Object* Runtime_DebugGetPropertyDetails(Arguments args) {
CONVERT_ARG_CHECKED(JSObject, obj, 0);
CONVERT_ARG_CHECKED(String, name, 1);
+ // Make sure to set the current context to the context before the debugger was
+ // entered (if the debugger is entered). The reason for switching context here
+ // is that for some property lookups (accessors and interceptors) callbacks
+ // into the embedding application can occour, and the embedding application
+ // could have the assumption that its own global context is the current
+ // context and not some internal debugger context.
+ SaveContext save;
+ if (Debug::InDebugger()) {
+ Top::set_context(*Debug::debugger_entry()->GetContext());
+ }
+
// Skip the global proxy as it has no properties and always delegates to the
// real global object.
if (obj->IsJSGlobalProxy()) {
@@ -5580,15 +5647,29 @@ static Object* Runtime_DebugGetPropertyDetails(Arguments args) {
}
if (result.IsProperty()) {
+ // LookupResult is not GC safe as all its members are raw object pointers.
+ // When calling DebugLookupResultValue GC can happen as this might invoke
+ // callbacks. After the call to DebugLookupResultValue the callback object
+ // in the LookupResult might still be needed. Put it into a handle for later
+ // use.
+ PropertyType result_type = result.type();
+ Handle<Object> result_callback_obj;
+ if (result_type == CALLBACKS) {
+ result_callback_obj = Handle<Object>(result.GetCallbackObject());
+ }
+
+ // Find the actual value. Don't use result after this call as it's content
+ // can be invalid.
bool caught_exception = false;
- Object* value = DebugLookupResultValue(*obj, &result,
+ Object* value = DebugLookupResultValue(*obj, *name, &result,
&caught_exception);
if (value->IsFailure()) return value;
Handle<Object> value_handle(value);
+
// If the callback object is a fixed array then it contains JavaScript
// getter and/or setter.
- bool hasJavaScriptAccessors = result.type() == CALLBACKS &&
- result.GetCallbackObject()->IsFixedArray();
+ bool hasJavaScriptAccessors = result_type == CALLBACKS &&
+ result_callback_obj->IsFixedArray();
Handle<FixedArray> details =
Factory::NewFixedArray(hasJavaScriptAccessors ? 5 : 2);
details->set(0, *value_handle);
@@ -5617,7 +5698,7 @@ static Object* Runtime_DebugGetProperty(Arguments args) {
LookupResult result;
obj->Lookup(*name, &result);
if (result.IsProperty()) {
- return DebugLookupResultValue(*obj, &result, NULL);
+ return DebugLookupResultValue(*obj, *name, &result, NULL);
}
return Heap::undefined_value();
}
@@ -6031,6 +6112,11 @@ static Object* Runtime_GetCFrames(Arguments args) {
Object* result = Runtime_CheckExecutionState(args);
if (result->IsFailure()) return result;
+#if V8_HOST_ARCH_64_BIT
+ UNIMPLEMENTED();
+ return Heap::undefined_value();
+#else
+
static const int kMaxCFramesSize = 200;
ScopedVector<OS::StackFrame> frames(kMaxCFramesSize);
int frames_count = OS::StackWalk(frames);
@@ -6062,6 +6148,7 @@ static Object* Runtime_GetCFrames(Arguments args) {
frames_array->set(i, *frame_value);
}
return *Factory::NewJSArrayWithElements(frames_array);
+#endif // V8_HOST_ARCH_64_BIT
}
@@ -6545,7 +6632,6 @@ static Object* Runtime_DebugEvaluate(Arguments args) {
Handle<JSFunction> boilerplate =
Compiler::CompileEval(function_source,
context,
- 0,
context->IsGlobalContext(),
false);
if (boilerplate.is_null()) return Failure::Exception();
@@ -6607,7 +6693,6 @@ static Object* Runtime_DebugEvaluateGlobal(Arguments args) {
Handle<JSFunction> boilerplate =
Handle<JSFunction>(Compiler::CompileEval(source,
context,
- 0,
true,
false));
if (boilerplate.is_null()) return Failure::Exception();
@@ -6626,67 +6711,15 @@ static Object* Runtime_DebugEvaluateGlobal(Arguments args) {
}
-// If an object given is an external string, check that the underlying
-// resource is accessible. For other kinds of objects, always return true.
-static bool IsExternalStringValid(Object* str) {
- if (!str->IsString() || !StringShape(String::cast(str)).IsExternal()) {
- return true;
- }
- if (String::cast(str)->IsAsciiRepresentation()) {
- return ExternalAsciiString::cast(str)->resource() != NULL;
- } else if (String::cast(str)->IsTwoByteRepresentation()) {
- return ExternalTwoByteString::cast(str)->resource() != NULL;
- } else {
- return true;
- }
-}
-
-
-// Helper function used by Runtime_DebugGetLoadedScripts below.
-static int DebugGetLoadedScripts(FixedArray* instances, int instances_size) {
- NoHandleAllocation ha;
- AssertNoAllocation no_alloc;
-
- // Scan heap for Script objects.
- int count = 0;
- HeapIterator iterator;
- while (iterator.has_next()) {
- HeapObject* obj = iterator.next();
- ASSERT(obj != NULL);
- if (obj->IsScript() && IsExternalStringValid(Script::cast(obj)->source())) {
- if (instances != NULL && count < instances_size) {
- instances->set(count, obj);
- }
- count++;
- }
- }
-
- return count;
-}
-
-
static Object* Runtime_DebugGetLoadedScripts(Arguments args) {
HandleScope scope;
ASSERT(args.length() == 0);
- // Perform two GCs to get rid of all unreferenced scripts. The first GC gets
- // rid of all the cached script wrappers and the second gets rid of the
- // scripts which is no longer referenced.
- Heap::CollectAllGarbage();
- Heap::CollectAllGarbage();
-
- // Get the number of scripts.
- int count;
- count = DebugGetLoadedScripts(NULL, 0);
-
- // Allocate an array to hold the result.
- Handle<FixedArray> instances = Factory::NewFixedArray(count);
-
// Fill the script objects.
- count = DebugGetLoadedScripts(*instances, count);
+ Handle<FixedArray> instances = Debug::GetLoadedScripts();
// Convert the script objects to proper JS objects.
- for (int i = 0; i < count; i++) {
+ for (int i = 0; i < instances->length(); i++) {
Handle<Script> script = Handle<Script>(Script::cast(instances->get(i)));
// Get the script wrapper in a local handle before calling GetScriptWrapper,
// because using
diff --git a/deps/v8/src/runtime.h b/deps/v8/src/runtime.h
index 204129515..30bb7c5ae 100644
--- a/deps/v8/src/runtime.h
+++ b/deps/v8/src/runtime.h
@@ -28,7 +28,8 @@
#ifndef V8_RUNTIME_H_
#define V8_RUNTIME_H_
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
// The interface to C++ runtime functions.
@@ -37,7 +38,10 @@ namespace v8 { namespace internal {
// release and debug mode.
// This macro should only be used by the macro RUNTIME_FUNCTION_LIST.
-#define RUNTIME_FUNCTION_LIST_ALWAYS(F) \
+// WARNING: RUNTIME_FUNCTION_LIST_ALWAYS_* is a very large macro that caused
+// MSVC Intellisense to crash. It was broken into two macros to work around
+// this problem. Please avoid large recursive macros whenever possible.
+#define RUNTIME_FUNCTION_LIST_ALWAYS_1(F) \
/* Property access */ \
F(GetProperty, 2) \
F(KeyedGetProperty, 2) \
@@ -60,6 +64,7 @@ namespace v8 { namespace internal {
/* Utilities */ \
F(GetCalledFunction, 0) \
F(GetFunctionDelegate, 1) \
+ F(GetConstructorDelegate, 1) \
F(NewArguments, 1) \
F(NewArgumentsFast, 3) \
F(LazyCompile, 1) \
@@ -153,8 +158,9 @@ namespace v8 { namespace internal {
F(NumberToRadixString, 2) \
F(NumberToFixed, 2) \
F(NumberToExponential, 2) \
- F(NumberToPrecision, 2) \
- \
+ F(NumberToPrecision, 2)
+
+#define RUNTIME_FUNCTION_LIST_ALWAYS_2(F) \
/* Reflection */ \
F(FunctionSetInstanceClassName, 2) \
F(FunctionSetLength, 2) \
@@ -195,7 +201,7 @@ namespace v8 { namespace internal {
F(NumberIsFinite, 1) \
\
/* Globals */ \
- F(CompileString, 3) \
+ F(CompileString, 2) \
F(GlobalPrint, 1) \
\
/* Eval */ \
@@ -320,7 +326,8 @@ namespace v8 { namespace internal {
// via a native call by name (from within JS code).
#define RUNTIME_FUNCTION_LIST(F) \
- RUNTIME_FUNCTION_LIST_ALWAYS(F) \
+ RUNTIME_FUNCTION_LIST_ALWAYS_1(F) \
+ RUNTIME_FUNCTION_LIST_ALWAYS_2(F) \
RUNTIME_FUNCTION_LIST_DEBUG(F) \
RUNTIME_FUNCTION_LIST_DEBUGGER_SUPPORT(F)
@@ -380,6 +387,9 @@ class Runtime : public AllStatic {
Handle<Object> value,
PropertyAttributes attr);
+ static Object* ForceDeleteObjectProperty(Handle<JSObject> object,
+ Handle<Object> key);
+
static Object* GetObjectProperty(Handle<Object> object, Handle<Object> key);
// This function is used in FunctionNameUsing* tests.
diff --git a/deps/v8/src/runtime.js b/deps/v8/src/runtime.js
index 63e92921a..c8ccf9f84 100644
--- a/deps/v8/src/runtime.js
+++ b/deps/v8/src/runtime.js
@@ -327,6 +327,18 @@ function CALL_NON_FUNCTION() {
}
+function CALL_NON_FUNCTION_AS_CONSTRUCTOR() {
+ var callee = %GetCalledFunction();
+ var delegate = %GetConstructorDelegate(callee);
+ if (!IS_FUNCTION(delegate)) {
+ throw %MakeTypeError('called_non_callable', [typeof callee]);
+ }
+
+ var parameters = %NewArguments(delegate);
+ return delegate.apply(callee, parameters);
+}
+
+
function APPLY_PREPARE(args) {
var length;
// First check whether length is a positive Smi and args is an array. This is the
diff --git a/deps/v8/src/scanner.cc b/deps/v8/src/scanner.cc
index 65ec0f8b7..24a6d4be9 100644
--- a/deps/v8/src/scanner.cc
+++ b/deps/v8/src/scanner.cc
@@ -30,7 +30,8 @@
#include "ast.h"
#include "scanner.h"
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
// ----------------------------------------------------------------------------
// Character predicates
@@ -48,8 +49,12 @@ StaticResource<Scanner::Utf8Decoder> Scanner::utf8_decoder_;
// ----------------------------------------------------------------------------
// UTF8Buffer
-UTF8Buffer::UTF8Buffer() : data_(NULL) {
- Initialize(NULL, 0);
+UTF8Buffer::UTF8Buffer() {
+ static const int kInitialCapacity = 1 * KB;
+ data_ = NewArray<char>(kInitialCapacity);
+ limit_ = ComputeLimit(data_, kInitialCapacity);
+ Reset();
+ ASSERT(Capacity() == kInitialCapacity && pos() == 0);
}
@@ -58,33 +63,27 @@ UTF8Buffer::~UTF8Buffer() {
}
-void UTF8Buffer::Initialize(char* src, int length) {
- DeleteArray(data_);
- data_ = src;
- size_ = length;
- Reset();
-}
-
-
-void UTF8Buffer::AddChar(uc32 c) {
- const int min_size = 1024;
- if (pos_ + static_cast<int>(unibrow::Utf8::kMaxEncodedSize) > size_) {
- int new_size = size_ * 2;
- if (new_size < min_size) {
- new_size = min_size;
- }
- char* new_data = NewArray<char>(new_size);
- memcpy(new_data, data_, pos_);
+void UTF8Buffer::AddCharSlow(uc32 c) {
+ static const int kCapacityGrowthLimit = 1 * MB;
+ if (cursor_ > limit_) {
+ int old_capacity = Capacity();
+ int old_position = pos();
+ int new_capacity =
+ Min(old_capacity * 2, old_capacity + kCapacityGrowthLimit);
+ char* new_data = NewArray<char>(new_capacity);
+ memcpy(new_data, data_, old_position);
DeleteArray(data_);
data_ = new_data;
- size_ = new_size;
+ cursor_ = new_data + old_position;
+ limit_ = ComputeLimit(new_data, new_capacity);
+ ASSERT(Capacity() == new_capacity && pos() == old_position);
}
- if (static_cast<unsigned>(c) < unibrow::Utf8::kMaxOneByteChar) {
- data_[pos_++] = c; // common case: 7bit ASCII
+ if (static_cast<unsigned>(c) <= unibrow::Utf8::kMaxOneByteChar) {
+ *cursor_++ = c; // Common case: 7-bit ASCII.
} else {
- pos_ += unibrow::Utf8::Encode(&data_[pos_], c);
+ cursor_ += unibrow::Utf8::Encode(cursor_, c);
}
- ASSERT(pos_ <= size_);
+ ASSERT(pos() <= Capacity());
}
@@ -172,9 +171,10 @@ void Scanner::Init(Handle<String> source, unibrow::CharacterStream* stream,
ASSERT(kCharacterLookaheadBufferSize == 1);
Advance();
- // Skip initial whitespace (allowing HTML comment ends) and scan
- // first token.
- SkipWhiteSpace(true);
+ // Skip initial whitespace allowing HTML comment ends just like
+ // after a newline and scan first token.
+ has_line_terminator_before_next_ = true;
+ SkipWhiteSpace();
Scan();
}
@@ -246,18 +246,19 @@ static inline bool IsByteOrderMark(uc32 c) {
}
-void Scanner::SkipWhiteSpace(bool initial) {
- has_line_terminator_before_next_ = initial;
+bool Scanner::SkipWhiteSpace() {
+ int start_position = source_pos();
while (true) {
// We treat byte-order marks (BOMs) as whitespace for better
// compatibility with Spidermonkey and other JavaScript engines.
while (kIsWhiteSpace.get(c0_) || IsByteOrderMark(c0_)) {
// IsWhiteSpace() includes line terminators!
- if (kIsLineTerminator.get(c0_))
+ if (kIsLineTerminator.get(c0_)) {
// Ignore line terminators, but remember them. This is necessary
// for automatic semicolon insertion.
has_line_terminator_before_next_ = true;
+ }
Advance();
}
@@ -279,7 +280,8 @@ void Scanner::SkipWhiteSpace(bool initial) {
}
PushBack('-'); // undo Advance()
}
- return;
+ // Return whether or not we skipped any characters.
+ return source_pos() != start_position;
}
}
@@ -296,7 +298,7 @@ Token::Value Scanner::SkipSingleLineComment() {
Advance();
}
- return Token::COMMENT;
+ return Token::WHITESPACE;
}
@@ -316,7 +318,7 @@ Token::Value Scanner::SkipMultiLineComment() {
// matches the behaviour of SpiderMonkey and KJS.
if (ch == '*' && c0_ == '/') {
c0_ = ' ';
- return Token::COMMENT;
+ return Token::WHITESPACE;
}
}
@@ -342,18 +344,238 @@ Token::Value Scanner::ScanHtmlComment() {
void Scanner::Scan() {
Token::Value token;
- bool has_line_terminator = false;
+ has_line_terminator_before_next_ = false;
do {
- SkipWhiteSpace(has_line_terminator);
-
- // Remember the line terminator in previous loop
- has_line_terminator = has_line_terminator_before_next();
-
// Remember the position of the next token
next_.location.beg_pos = source_pos();
- token = ScanToken();
- } while (token == Token::COMMENT);
+ switch (c0_) {
+ case ' ':
+ case '\t':
+ Advance();
+ token = Token::WHITESPACE;
+ break;
+
+ case '\n':
+ Advance();
+ has_line_terminator_before_next_ = true;
+ token = Token::WHITESPACE;
+ break;
+
+ case '"': case '\'':
+ token = ScanString();
+ break;
+
+ case '<':
+ // < <= << <<= <!--
+ Advance();
+ if (c0_ == '=') {
+ token = Select(Token::LTE);
+ } else if (c0_ == '<') {
+ token = Select('=', Token::ASSIGN_SHL, Token::SHL);
+ } else if (c0_ == '!') {
+ token = ScanHtmlComment();
+ } else {
+ token = Token::LT;
+ }
+ break;
+
+ case '>':
+ // > >= >> >>= >>> >>>=
+ Advance();
+ if (c0_ == '=') {
+ token = Select(Token::GTE);
+ } else if (c0_ == '>') {
+ // >> >>= >>> >>>=
+ Advance();
+ if (c0_ == '=') {
+ token = Select(Token::ASSIGN_SAR);
+ } else if (c0_ == '>') {
+ token = Select('=', Token::ASSIGN_SHR, Token::SHR);
+ } else {
+ token = Token::SAR;
+ }
+ } else {
+ token = Token::GT;
+ }
+ break;
+
+ case '=':
+ // = == ===
+ Advance();
+ if (c0_ == '=') {
+ token = Select('=', Token::EQ_STRICT, Token::EQ);
+ } else {
+ token = Token::ASSIGN;
+ }
+ break;
+
+ case '!':
+ // ! != !==
+ Advance();
+ if (c0_ == '=') {
+ token = Select('=', Token::NE_STRICT, Token::NE);
+ } else {
+ token = Token::NOT;
+ }
+ break;
+
+ case '+':
+ // + ++ +=
+ Advance();
+ if (c0_ == '+') {
+ token = Select(Token::INC);
+ } else if (c0_ == '=') {
+ token = Select(Token::ASSIGN_ADD);
+ } else {
+ token = Token::ADD;
+ }
+ break;
+
+ case '-':
+ // - -- --> -=
+ Advance();
+ if (c0_ == '-') {
+ Advance();
+ if (c0_ == '>' && has_line_terminator_before_next_) {
+ // For compatibility with SpiderMonkey, we skip lines that
+ // start with an HTML comment end '-->'.
+ token = SkipSingleLineComment();
+ } else {
+ token = Token::DEC;
+ }
+ } else if (c0_ == '=') {
+ token = Select(Token::ASSIGN_SUB);
+ } else {
+ token = Token::SUB;
+ }
+ break;
+
+ case '*':
+ // * *=
+ token = Select('=', Token::ASSIGN_MUL, Token::MUL);
+ break;
+
+ case '%':
+ // % %=
+ token = Select('=', Token::ASSIGN_MOD, Token::MOD);
+ break;
+
+ case '/':
+ // / // /* /=
+ Advance();
+ if (c0_ == '/') {
+ token = SkipSingleLineComment();
+ } else if (c0_ == '*') {
+ token = SkipMultiLineComment();
+ } else if (c0_ == '=') {
+ token = Select(Token::ASSIGN_DIV);
+ } else {
+ token = Token::DIV;
+ }
+ break;
+
+ case '&':
+ // & && &=
+ Advance();
+ if (c0_ == '&') {
+ token = Select(Token::AND);
+ } else if (c0_ == '=') {
+ token = Select(Token::ASSIGN_BIT_AND);
+ } else {
+ token = Token::BIT_AND;
+ }
+ break;
+
+ case '|':
+ // | || |=
+ Advance();
+ if (c0_ == '|') {
+ token = Select(Token::OR);
+ } else if (c0_ == '=') {
+ token = Select(Token::ASSIGN_BIT_OR);
+ } else {
+ token = Token::BIT_OR;
+ }
+ break;
+
+ case '^':
+ // ^ ^=
+ token = Select('=', Token::ASSIGN_BIT_XOR, Token::BIT_XOR);
+ break;
+
+ case '.':
+ // . Number
+ Advance();
+ if (IsDecimalDigit(c0_)) {
+ token = ScanNumber(true);
+ } else {
+ token = Token::PERIOD;
+ }
+ break;
+
+ case ':':
+ token = Select(Token::COLON);
+ break;
+
+ case ';':
+ token = Select(Token::SEMICOLON);
+ break;
+
+ case ',':
+ token = Select(Token::COMMA);
+ break;
+
+ case '(':
+ token = Select(Token::LPAREN);
+ break;
+
+ case ')':
+ token = Select(Token::RPAREN);
+ break;
+
+ case '[':
+ token = Select(Token::LBRACK);
+ break;
+
+ case ']':
+ token = Select(Token::RBRACK);
+ break;
+
+ case '{':
+ token = Select(Token::LBRACE);
+ break;
+
+ case '}':
+ token = Select(Token::RBRACE);
+ break;
+
+ case '?':
+ token = Select(Token::CONDITIONAL);
+ break;
+
+ case '~':
+ token = Select(Token::BIT_NOT);
+ break;
+
+ default:
+ if (kIsIdentifierStart.get(c0_)) {
+ token = ScanIdentifier();
+ } else if (IsDecimalDigit(c0_)) {
+ token = ScanNumber(false);
+ } else if (SkipWhiteSpace()) {
+ token = Token::WHITESPACE;
+ } else if (c0_ < 0) {
+ token = Token::EOS;
+ } else {
+ token = Select(Token::ILLEGAL);
+ }
+ break;
+ }
+
+ // Continue scanning for tokens as long as we're just skipping
+ // whitespace.
+ } while (token == Token::WHITESPACE);
next_.location.end_pos = source_pos();
next_.token = token;
@@ -495,147 +717,6 @@ Token::Value Scanner::Select(uc32 next, Token::Value then, Token::Value else_) {
}
-Token::Value Scanner::ScanToken() {
- switch (c0_) {
- // strings
- case '"': case '\'':
- return ScanString();
-
- case '<':
- // < <= << <<= <!--
- Advance();
- if (c0_ == '=') return Select(Token::LTE);
- if (c0_ == '<') return Select('=', Token::ASSIGN_SHL, Token::SHL);
- if (c0_ == '!') return ScanHtmlComment();
- return Token::LT;
-
- case '>':
- // > >= >> >>= >>> >>>=
- Advance();
- if (c0_ == '=') return Select(Token::GTE);
- if (c0_ == '>') {
- // >> >>= >>> >>>=
- Advance();
- if (c0_ == '=') return Select(Token::ASSIGN_SAR);
- if (c0_ == '>') return Select('=', Token::ASSIGN_SHR, Token::SHR);
- return Token::SAR;
- }
- return Token::GT;
-
- case '=':
- // = == ===
- Advance();
- if (c0_ == '=') return Select('=', Token::EQ_STRICT, Token::EQ);
- return Token::ASSIGN;
-
- case '!':
- // ! != !==
- Advance();
- if (c0_ == '=') return Select('=', Token::NE_STRICT, Token::NE);
- return Token::NOT;
-
- case '+':
- // + ++ +=
- Advance();
- if (c0_ == '+') return Select(Token::INC);
- if (c0_ == '=') return Select(Token::ASSIGN_ADD);
- return Token::ADD;
-
- case '-':
- // - -- -=
- Advance();
- if (c0_ == '-') return Select(Token::DEC);
- if (c0_ == '=') return Select(Token::ASSIGN_SUB);
- return Token::SUB;
-
- case '*':
- // * *=
- return Select('=', Token::ASSIGN_MUL, Token::MUL);
-
- case '%':
- // % %=
- return Select('=', Token::ASSIGN_MOD, Token::MOD);
-
- case '/':
- // / // /* /=
- Advance();
- if (c0_ == '/') return SkipSingleLineComment();
- if (c0_ == '*') return SkipMultiLineComment();
- if (c0_ == '=') return Select(Token::ASSIGN_DIV);
- return Token::DIV;
-
- case '&':
- // & && &=
- Advance();
- if (c0_ == '&') return Select(Token::AND);
- if (c0_ == '=') return Select(Token::ASSIGN_BIT_AND);
- return Token::BIT_AND;
-
- case '|':
- // | || |=
- Advance();
- if (c0_ == '|') return Select(Token::OR);
- if (c0_ == '=') return Select(Token::ASSIGN_BIT_OR);
- return Token::BIT_OR;
-
- case '^':
- // ^ ^=
- return Select('=', Token::ASSIGN_BIT_XOR, Token::BIT_XOR);
-
- case '.':
- // . Number
- Advance();
- if (IsDecimalDigit(c0_)) return ScanNumber(true);
- return Token::PERIOD;
-
- case ':':
- return Select(Token::COLON);
-
- case ';':
- return Select(Token::SEMICOLON);
-
- case ',':
- return Select(Token::COMMA);
-
- case '(':
- return Select(Token::LPAREN);
-
- case ')':
- return Select(Token::RPAREN);
-
- case '[':
- return Select(Token::LBRACK);
-
- case ']':
- return Select(Token::RBRACK);
-
- case '{':
- return Select(Token::LBRACE);
-
- case '}':
- return Select(Token::RBRACE);
-
- case '?':
- return Select(Token::CONDITIONAL);
-
- case '~':
- return Select(Token::BIT_NOT);
-
- default:
- if (kIsIdentifierStart.get(c0_))
- return ScanIdentifier();
- if (IsDecimalDigit(c0_))
- return ScanNumber(false);
- if (c0_ < 0)
- return Token::EOS;
- return Select(Token::ILLEGAL);
- }
-
- UNREACHABLE();
- return Token::ILLEGAL;
-}
-
-
// Returns true if any decimal digits were scanned, returns false otherwise.
void Scanner::ScanDecimalDigits() {
while (IsDecimalDigit(c0_))
@@ -734,7 +815,6 @@ uc32 Scanner::ScanIdentifierUnicodeEscape() {
Token::Value Scanner::ScanIdentifier() {
ASSERT(kIsIdentifierStart.get(c0_));
-
bool has_escapes = false;
StartLiteral();
@@ -746,8 +826,10 @@ Token::Value Scanner::ScanIdentifier() {
if (!kIsIdentifierStart.get(c)) return Token::ILLEGAL;
AddChar(c);
} else {
- AddCharAdvance();
+ AddChar(c0_);
+ Advance();
}
+
// Scan the rest of the identifier characters.
while (kIsIdentifierPart.get(c0_)) {
if (c0_ == '\\') {
@@ -757,19 +839,22 @@ Token::Value Scanner::ScanIdentifier() {
if (!kIsIdentifierPart.get(c)) return Token::ILLEGAL;
AddChar(c);
} else {
- AddCharAdvance();
+ AddChar(c0_);
+ Advance();
}
}
TerminateLiteral();
// We don't have any 1-letter keywords (this is probably a common case).
- if ((next_.literal_end - next_.literal_pos) == 1)
+ if ((next_.literal_end - next_.literal_pos) == 1) {
return Token::IDENTIFIER;
+ }
// If the identifier contains unicode escapes, it must not be
// resolved to a keyword.
- if (has_escapes)
+ if (has_escapes) {
return Token::IDENTIFIER;
+ }
return Token::Lookup(&literals_.data()[next_.literal_pos]);
}
diff --git a/deps/v8/src/scanner.h b/deps/v8/src/scanner.h
index 79a4a4c24..eea23a70c 100644
--- a/deps/v8/src/scanner.h
+++ b/deps/v8/src/scanner.h
@@ -31,7 +31,8 @@
#include "token.h"
#include "char-predicates-inl.h"
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
class UTF8Buffer {
@@ -39,16 +40,33 @@ class UTF8Buffer {
UTF8Buffer();
~UTF8Buffer();
- void Initialize(char* src, int length);
- void AddChar(uc32 c);
- void Reset() { pos_ = 0; }
- int pos() const { return pos_; }
+ void AddChar(uc32 c) {
+ if (cursor_ <= limit_ &&
+ static_cast<unsigned>(c) <= unibrow::Utf8::kMaxOneByteChar) {
+ *cursor_++ = static_cast<char>(c);
+ } else {
+ AddCharSlow(c);
+ }
+ }
+
+ void Reset() { cursor_ = data_; }
+ int pos() const { return cursor_ - data_; }
char* data() const { return data_; }
private:
char* data_;
- int size_;
- int pos_;
+ char* cursor_;
+ char* limit_;
+
+ int Capacity() const {
+ return (limit_ - data_) + unibrow::Utf8::kMaxEncodedSize;
+ }
+
+ static char* ComputeLimit(char* data, int capacity) {
+ return (data + capacity) - unibrow::Utf8::kMaxEncodedSize;
+ }
+
+ void AddCharSlow(uc32 c);
};
@@ -204,7 +222,7 @@ class Scanner {
void Advance();
void PushBack(uc32 ch);
- void SkipWhiteSpace(bool initial);
+ bool SkipWhiteSpace();
Token::Value SkipSingleLineComment();
Token::Value SkipMultiLineComment();
@@ -212,7 +230,6 @@ class Scanner {
inline Token::Value Select(uc32 next, Token::Value then, Token::Value else_);
void Scan();
- Token::Value ScanToken();
void ScanDecimalDigits();
Token::Value ScanNumber(bool seen_period);
Token::Value ScanIdentifier();
diff --git a/deps/v8/src/scopeinfo.cc b/deps/v8/src/scopeinfo.cc
index 6d2ade815..fedfbd64f 100644
--- a/deps/v8/src/scopeinfo.cc
+++ b/deps/v8/src/scopeinfo.cc
@@ -32,7 +32,8 @@
#include "scopeinfo.h"
#include "scopes.h"
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
static int CompareLocal(Variable* const* v, Variable* const* w) {
@@ -566,5 +567,6 @@ void ScopeInfo<Allocator>::Print() {
// Make sure the classes get instantiated by the template system.
template class ScopeInfo<FreeStoreAllocationPolicy>;
template class ScopeInfo<PreallocatedStorage>;
+template class ScopeInfo<ZoneListAllocationPolicy>;
} } // namespace v8::internal
diff --git a/deps/v8/src/scopeinfo.h b/deps/v8/src/scopeinfo.h
index dbe235ad0..a097d34f9 100644
--- a/deps/v8/src/scopeinfo.h
+++ b/deps/v8/src/scopeinfo.h
@@ -30,7 +30,8 @@
#include "variables.h"
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
// Scope information represents information about a functions's
// scopes (currently only one, because we don't do any inlining)
@@ -150,6 +151,18 @@ class ScopeInfo BASE_EMBEDDED {
List<Variable::Mode, Allocator > context_modes_;
};
+class ZoneScopeInfo: public ScopeInfo<ZoneListAllocationPolicy> {
+ public:
+ // Create a ZoneScopeInfo instance from a scope.
+ explicit ZoneScopeInfo(Scope* scope)
+ : ScopeInfo<ZoneListAllocationPolicy>(scope) {}
+
+ // Create a ZoneScopeInfo instance from a Code object.
+ explicit ZoneScopeInfo(Code* code)
+ : ScopeInfo<ZoneListAllocationPolicy>(code) {}
+};
+
+
} } // namespace v8::internal
#endif // V8_SCOPEINFO_H_
diff --git a/deps/v8/src/scopes.cc b/deps/v8/src/scopes.cc
index e959f0252..7122eb03c 100644
--- a/deps/v8/src/scopes.cc
+++ b/deps/v8/src/scopes.cc
@@ -31,7 +31,8 @@
#include "scopeinfo.h"
#include "scopes.h"
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
// ----------------------------------------------------------------------------
// A Zone allocator for use with LocalsMap.
@@ -112,7 +113,7 @@ Scope::Scope()
locals_(false),
temps_(0),
params_(0),
- nonlocals_(0),
+ dynamics_(NULL),
unresolved_(0),
decls_(0) {
}
@@ -123,10 +124,9 @@ Scope::Scope(Scope* outer_scope, Type type)
inner_scopes_(4),
type_(type),
scope_name_(Factory::empty_symbol()),
- locals_(),
temps_(4),
params_(4),
- nonlocals_(4),
+ dynamics_(NULL),
unresolved_(16),
decls_(4),
receiver_(NULL),
@@ -302,6 +302,8 @@ template void Scope::CollectUsedVariables(
List<Variable*, FreeStoreAllocationPolicy>* locals);
template void Scope::CollectUsedVariables(
List<Variable*, PreallocatedStorage>* locals);
+template void Scope::CollectUsedVariables(
+ List<Variable*, ZoneListAllocationPolicy>* locals);
void Scope::AllocateVariables(Handle<Context> context) {
@@ -405,6 +407,14 @@ static void PrintVar(PrettyPrinter* printer, int indent, Variable* var) {
}
+static void PrintMap(PrettyPrinter* printer, int indent, LocalsMap* map) {
+ for (LocalsMap::Entry* p = map->Start(); p != NULL; p = map->Next(p)) {
+ Variable* var = reinterpret_cast<Variable*>(p->value);
+ PrintVar(printer, indent, var);
+ }
+}
+
+
void Scope::Print(int n) {
int n0 = (n > 0 ? n : 0);
int n1 = n0 + 2; // indentation
@@ -465,14 +475,14 @@ void Scope::Print(int n) {
}
Indent(n1, "// local vars\n");
- for (LocalsMap::Entry* p = locals_.Start(); p != NULL; p = locals_.Next(p)) {
- Variable* var = reinterpret_cast<Variable*>(p->value);
- PrintVar(&printer, n1, var);
- }
+ PrintMap(&printer, n1, &locals_);
- Indent(n1, "// nonlocal vars\n");
- for (int i = 0; i < nonlocals_.length(); i++)
- PrintVar(&printer, n1, nonlocals_[i]);
+ Indent(n1, "// dynamic vars\n");
+ if (dynamics_ != NULL) {
+ PrintMap(&printer, n1, dynamics_->GetMap(Variable::DYNAMIC));
+ PrintMap(&printer, n1, dynamics_->GetMap(Variable::DYNAMIC_LOCAL));
+ PrintMap(&printer, n1, dynamics_->GetMap(Variable::DYNAMIC_GLOBAL));
+ }
// Print inner scopes (disable by providing negative n).
if (n >= 0) {
@@ -488,22 +498,15 @@ void Scope::Print(int n) {
Variable* Scope::NonLocal(Handle<String> name, Variable::Mode mode) {
- // Space optimization: reuse existing non-local with the same name
- // and mode.
- for (int i = 0; i < nonlocals_.length(); i++) {
- Variable* var = nonlocals_[i];
- if (var->name().is_identical_to(name) && var->mode() == mode) {
- return var;
- }
+ if (dynamics_ == NULL) dynamics_ = new DynamicScopePart();
+ LocalsMap* map = dynamics_->GetMap(mode);
+ Variable* var = map->Lookup(name);
+ if (var == NULL) {
+ // Declare a new non-local.
+ var = map->Declare(NULL, name, mode, true, false);
+ // Allocate it by giving it a dynamic lookup.
+ var->rewrite_ = new Slot(var, Slot::LOOKUP, -1);
}
-
- // Otherwise create a new non-local and add it to the list.
- Variable* var = new Variable(NULL, name, mode, true, false);
- nonlocals_.Add(var);
-
- // Allocate it by giving it a dynamic lookup.
- var->rewrite_ = new Slot(var, Slot::LOOKUP, -1);
-
return var;
}
@@ -617,14 +620,6 @@ void Scope::ResolveVariable(Scope* global_scope,
ASSERT(global_scope != NULL);
var = new Variable(global_scope, proxy->name(),
Variable::DYNAMIC, true, false);
- // Ideally we simply rewrite these variables into property
- // accesses. Unfortunately, we cannot do this here at the
- // moment because then we can't differentiate between
- // global variable ('x') and global property ('this.x') access.
- // If 'x' doesn't exist, the former leads to an error, while the
- // latter returns undefined. Sigh...
- // var->rewrite_ = new Property(new Literal(env_->global()),
- // new Literal(proxy->name()));
} else if (scope_inside_with_) {
// If we are inside a with statement we give up and look up
diff --git a/deps/v8/src/scopes.h b/deps/v8/src/scopes.h
index e78bd2a2c..b2f61ef66 100644
--- a/deps/v8/src/scopes.h
+++ b/deps/v8/src/scopes.h
@@ -31,11 +31,11 @@
#include "ast.h"
#include "hashmap.h"
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
// A hash map to support fast local variable declaration and lookup.
-
class LocalsMap: public HashMap {
public:
LocalsMap();
@@ -53,6 +53,23 @@ class LocalsMap: public HashMap {
};
+// The dynamic scope part holds hash maps for the variables that will
+// be looked up dynamically from within eval and with scopes. The objects
+// are allocated on-demand from Scope::NonLocal to avoid wasting memory
+// and setup time for scopes that don't need them.
+class DynamicScopePart : public ZoneObject {
+ public:
+ LocalsMap* GetMap(Variable::Mode mode) {
+ int index = mode - Variable::DYNAMIC;
+ ASSERT(index >= 0 && index < 3);
+ return &maps_[index];
+ }
+
+ private:
+ LocalsMap maps_[3];
+};
+
+
// Global invariants after AST construction: Each reference (i.e. identifier)
// to a JavaScript variable (including global properties) is represented by a
// VariableProxy node. Immediately after AST construction and before variable
@@ -278,7 +295,7 @@ class Scope: public ZoneObject {
// parameter list in source order
ZoneList<Variable*> params_;
// variables that must be looked up dynamically
- ZoneList<Variable*> nonlocals_;
+ DynamicScopePart* dynamics_;
// unresolved variables referred to from this scope
ZoneList<VariableProxy*> unresolved_;
// declarations
diff --git a/deps/v8/src/serialize.cc b/deps/v8/src/serialize.cc
index 62287bc0d..fb66d2785 100644
--- a/deps/v8/src/serialize.cc
+++ b/deps/v8/src/serialize.cc
@@ -39,7 +39,8 @@
#include "stub-cache.h"
#include "v8threads.h"
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
// Encoding: a RelativeAddress must be able to fit in a pointer:
// it is encoded as an Address with (from MS to LS bits):
diff --git a/deps/v8/src/serialize.h b/deps/v8/src/serialize.h
index f6594aca8..7f4eb6321 100644
--- a/deps/v8/src/serialize.h
+++ b/deps/v8/src/serialize.h
@@ -30,7 +30,8 @@
#include "hashmap.h"
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
// A TypeCode is used to distinguish different kinds of external reference.
// It is a single bit to make testing for types easy.
diff --git a/deps/v8/src/shell.h b/deps/v8/src/shell.h
index 671245128..ca510408c 100644
--- a/deps/v8/src/shell.h
+++ b/deps/v8/src/shell.h
@@ -32,7 +32,8 @@
#include "../public/debug.h"
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
// Debug event handler for interactive debugging.
void handle_debug_event(v8::DebugEvent event,
diff --git a/deps/v8/src/smart-pointer.h b/deps/v8/src/smart-pointer.h
index c39df1686..0fa8224e7 100644
--- a/deps/v8/src/smart-pointer.h
+++ b/deps/v8/src/smart-pointer.h
@@ -28,7 +28,8 @@
#ifndef V8_SMART_POINTER_H_
#define V8_SMART_POINTER_H_
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
// A 'scoped array pointer' that calls DeleteArray on its pointer when the
diff --git a/deps/v8/src/snapshot-common.cc b/deps/v8/src/snapshot-common.cc
index 3aa1caec0..9c66a5037 100644
--- a/deps/v8/src/snapshot-common.cc
+++ b/deps/v8/src/snapshot-common.cc
@@ -33,7 +33,8 @@
#include "serialize.h"
#include "snapshot.h"
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
bool Snapshot::Deserialize(const byte* content, int len) {
Deserializer des(content, len);
diff --git a/deps/v8/src/snapshot-empty.cc b/deps/v8/src/snapshot-empty.cc
index d4cda19c2..60ab1e564 100644
--- a/deps/v8/src/snapshot-empty.cc
+++ b/deps/v8/src/snapshot-empty.cc
@@ -31,7 +31,8 @@
#include "snapshot.h"
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
const byte Snapshot::data_[] = { 0 };
int Snapshot::size_ = 0;
diff --git a/deps/v8/src/snapshot.h b/deps/v8/src/snapshot.h
index b3f23d3f7..88ba8db30 100644
--- a/deps/v8/src/snapshot.h
+++ b/deps/v8/src/snapshot.h
@@ -28,7 +28,8 @@
#ifndef V8_SNAPSHOT_H_
#define V8_SNAPSHOT_H_
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
class Snapshot {
public:
diff --git a/deps/v8/src/spaces-inl.h b/deps/v8/src/spaces-inl.h
index 397365847..2f01164f8 100644
--- a/deps/v8/src/spaces-inl.h
+++ b/deps/v8/src/spaces-inl.h
@@ -31,7 +31,8 @@
#include "memory.h"
#include "spaces.h"
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
// -----------------------------------------------------------------------------
@@ -92,8 +93,10 @@ Address Page::AllocationTop() {
void Page::ClearRSet() {
+#ifndef V8_HOST_ARCH_64_BIT
// This method can be called in all rset states.
memset(RSetStart(), 0, kRSetEndOffset - kRSetStartOffset);
+#endif
}
@@ -157,9 +160,14 @@ void Page::UnsetRSet(Address address, int offset) {
bool Page::IsRSetSet(Address address, int offset) {
+#ifdef V8_HOST_ARCH_64_BIT
+ // TODO(X64): Reenable when RSet works.
+ return true;
+#else // V8_HOST_ARCH_64_BIT
uint32_t bitmask = 0;
Address rset_address = ComputeRSetBitPosition(address, offset, &bitmask);
return (Memory::uint32_at(rset_address) & bitmask) != 0;
+#endif // V8_HOST_ARCH_64_BIT
}
@@ -194,7 +202,7 @@ bool MemoryAllocator::IsPageInSpace(Page* p, PagedSpace* space) {
Page* MemoryAllocator::GetNextPage(Page* p) {
ASSERT(p->is_valid());
- int raw_addr = p->opaque_header & ~Page::kPageAlignmentMask;
+ intptr_t raw_addr = p->opaque_header & ~Page::kPageAlignmentMask;
return Page::FromAddress(AddressFrom<Address>(raw_addr));
}
@@ -207,7 +215,7 @@ int MemoryAllocator::GetChunkId(Page* p) {
void MemoryAllocator::SetNextPage(Page* prev, Page* next) {
ASSERT(prev->is_valid());
- int chunk_id = prev->opaque_header & Page::kPageAlignmentMask;
+ int chunk_id = GetChunkId(prev);
ASSERT_PAGE_ALIGNED(next->address());
prev->opaque_header = OffsetFrom(next->address()) | chunk_id;
}
diff --git a/deps/v8/src/spaces.cc b/deps/v8/src/spaces.cc
index f15af9e72..72b028cde 100644
--- a/deps/v8/src/spaces.cc
+++ b/deps/v8/src/spaces.cc
@@ -31,7 +31,8 @@
#include "mark-compact.h"
#include "platform.h"
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
// For contiguous spaces, top should be in the space (or at the end) and limit
// should be the end of the space.
@@ -121,6 +122,15 @@ PageIterator::PageIterator(PagedSpace* space, Mode mode) : space_(space) {
stop_page_ = space->MCRelocationTopPage();
break;
case ALL_PAGES:
+#ifdef DEBUG
+ // Verify that the cached last page in the space is actually the
+ // last page.
+ for (Page* p = space->first_page_; p->is_valid(); p = p->next_page()) {
+ if (!p->next_page()->is_valid()) {
+ ASSERT(space->last_page_ == p);
+ }
+ }
+#endif
stop_page_ = space->last_page_;
break;
default:
@@ -731,6 +741,7 @@ void PagedSpace::Shrink() {
// Since pages are only freed in whole chunks, we may have kept more
// than pages_to_keep. Count the extra pages and cache the new last
// page in the space.
+ last_page_ = last_page_to_keep;
while (p->is_valid()) {
pages_to_keep++;
last_page_ = p;
@@ -1321,6 +1332,13 @@ int OldSpaceFreeList::Free(Address start, int size_in_bytes) {
FreeListNode* node = FreeListNode::FromAddress(start);
node->set_size(size_in_bytes);
+ // We don't use the freelists in compacting mode. This makes it more like a
+ // GC that only has mark-sweep-compact and doesn't have a mark-sweep
+ // collector.
+ if (FLAG_always_compact) {
+ return size_in_bytes;
+ }
+
// Early return to drop too-small blocks on the floor (one or two word
// blocks cannot hold a map pointer, a size field, and a pointer to the
// next block in the free list).
@@ -1352,6 +1370,7 @@ Object* OldSpaceFreeList::Allocate(int size_in_bytes, int* wasted_bytes) {
if ((free_[index].head_node_ = node->next()) == NULL) RemoveSize(index);
available_ -= size_in_bytes;
*wasted_bytes = 0;
+ ASSERT(!FLAG_always_compact); // We only use the freelists with mark-sweep.
return node;
}
// Search the size list for the best fit.
@@ -1363,6 +1382,7 @@ Object* OldSpaceFreeList::Allocate(int size_in_bytes, int* wasted_bytes) {
*wasted_bytes = 0;
return Failure::RetryAfterGC(size_in_bytes, owner_);
}
+ ASSERT(!FLAG_always_compact); // We only use the freelists with mark-sweep.
int rem = cur - index;
int rem_bytes = rem << kPointerSizeLog2;
FreeListNode* cur_node = FreeListNode::FromAddress(free_[cur].head_node_);
@@ -1443,6 +1463,7 @@ void MapSpaceFreeList::Free(Address start) {
Memory::Address_at(start + i) = kZapValue;
}
#endif
+ ASSERT(!FLAG_always_compact); // We only use the freelists with mark-sweep.
FreeListNode* node = FreeListNode::FromAddress(start);
node->set_size(Map::kSize);
node->set_next(head_);
@@ -1456,6 +1477,7 @@ Object* MapSpaceFreeList::Allocate() {
return Failure::RetryAfterGC(Map::kSize, owner_);
}
+ ASSERT(!FLAG_always_compact); // We only use the freelists with mark-sweep.
FreeListNode* node = FreeListNode::FromAddress(head_);
head_ = node->next();
available_ -= Map::kSize;
@@ -2412,6 +2434,13 @@ void LargeObjectSpace::ClearRSet() {
void LargeObjectSpace::IterateRSet(ObjectSlotCallback copy_object_func) {
ASSERT(Page::is_rset_in_use());
+ static void* lo_rset_histogram = StatsTable::CreateHistogram(
+ "V8.RSetLO",
+ 0,
+ // Keeping this histogram's buckets the same as the paged space histogram.
+ Page::kObjectAreaSize / kPointerSize,
+ 30);
+
LargeObjectIterator it(this);
while (it.has_next()) {
// We only have code, sequential strings, or fixed arrays in large
@@ -2422,15 +2451,18 @@ void LargeObjectSpace::IterateRSet(ObjectSlotCallback copy_object_func) {
// Iterate the normal page remembered set range.
Page* page = Page::FromAddress(object->address());
Address object_end = object->address() + object->Size();
- Heap::IterateRSetRange(page->ObjectAreaStart(),
- Min(page->ObjectAreaEnd(), object_end),
- page->RSetStart(),
- copy_object_func);
+ int count = Heap::IterateRSetRange(page->ObjectAreaStart(),
+ Min(page->ObjectAreaEnd(), object_end),
+ page->RSetStart(),
+ copy_object_func);
// Iterate the extra array elements.
if (object_end > page->ObjectAreaEnd()) {
- Heap::IterateRSetRange(page->ObjectAreaEnd(), object_end,
- object_end, copy_object_func);
+ count += Heap::IterateRSetRange(page->ObjectAreaEnd(), object_end,
+ object_end, copy_object_func);
+ }
+ if (lo_rset_histogram != NULL) {
+ StatsTable::AddHistogramSample(lo_rset_histogram, count);
}
}
}
diff --git a/deps/v8/src/spaces.h b/deps/v8/src/spaces.h
index e8504a427..a62b0a8d3 100644
--- a/deps/v8/src/spaces.h
+++ b/deps/v8/src/spaces.h
@@ -31,7 +31,8 @@
#include "list-inl.h"
#include "log.h"
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
// -----------------------------------------------------------------------------
// Heap structures:
@@ -98,6 +99,7 @@ class AllocationInfo;
// its page offset by 32. Therefore, the object area in a page starts at the
// 256th byte (8K/32). Bytes 0 to 255 do not need the remembered set, so that
// the first two words (64 bits) in a page can be used for other purposes.
+// TODO(X64): This description only represents the 32-bit layout.
//
// The mark-compact collector transforms a map pointer into a page index and a
// page offset. The map space can have up to 1024 pages, and 8M bytes (1024 *
@@ -213,7 +215,7 @@ class Page {
static const int kPageSize = 1 << kPageSizeBits;
// Page size mask.
- static const int kPageAlignmentMask = (1 << kPageSizeBits) - 1;
+ static const intptr_t kPageAlignmentMask = (1 << kPageSizeBits) - 1;
// The end offset of the remembered set in a page
// (heaps are aligned to pointer size).
@@ -242,7 +244,7 @@ class Page {
// in the current page. If a page is in the large object space, the first
// word *may* (if the page start and large object chunk start are the
// same) contain the address of the next large object chunk.
- int opaque_header;
+ intptr_t opaque_header;
// If the page is not in the large object space, the low-order bit of the
// second word is set. If the page is in the large object space, the
diff --git a/deps/v8/src/string-stream.cc b/deps/v8/src/string-stream.cc
index 2e0912fbd..44ba29746 100644
--- a/deps/v8/src/string-stream.cc
+++ b/deps/v8/src/string-stream.cc
@@ -30,7 +30,8 @@
#include "factory.h"
#include "string-stream.h"
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
static const int kMentionedObjectCacheMaxSize = 256;
static List<HeapObject*, PreallocatedStorage>* debug_object_cache = NULL;
diff --git a/deps/v8/src/string-stream.h b/deps/v8/src/string-stream.h
index fa20064e4..15a72e0f3 100644
--- a/deps/v8/src/string-stream.h
+++ b/deps/v8/src/string-stream.h
@@ -28,7 +28,8 @@
#ifndef V8_STRING_STREAM_H_
#define V8_STRING_STREAM_H_
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
class StringAllocator {
diff --git a/deps/v8/src/string.js b/deps/v8/src/string.js
index 0bcabc943..df1f393e0 100644
--- a/deps/v8/src/string.js
+++ b/deps/v8/src/string.js
@@ -120,20 +120,26 @@ function StringIndexOf(searchString /* position */) { // length == 1
// ECMA-262 section 15.5.4.8
function StringLastIndexOf(searchString /* position */) { // length == 1
var sub = ToString(this);
+ var subLength = sub.length;
var pat = ToString(searchString);
- var index = (%_ArgumentsLength() > 1)
- ? ToNumber(%_Arguments(1) /* position */)
- : $NaN;
- var firstIndex;
- if ($isNaN(index)) {
- firstIndex = sub.length - pat.length;
- } else {
- firstIndex = TO_INTEGER(index);
- if (firstIndex + pat.length > sub.length) {
- firstIndex = sub.length - pat.length;
+ var patLength = pat.length;
+ var index = subLength - patLength;
+ if (%_ArgumentsLength() > 1) {
+ var position = ToNumber(%_Arguments(1));
+ if (!$isNaN(position)) {
+ position = TO_INTEGER(position);
+ if (position < 0) {
+ position = 0;
+ }
+ if (position + patLength < subLength) {
+ index = position
+ }
}
}
- return %StringLastIndexOf(sub, pat, firstIndex);
+ if (index < 0) {
+ return -1;
+ }
+ return %StringLastIndexOf(sub, pat, index);
}
diff --git a/deps/v8/src/stub-cache.cc b/deps/v8/src/stub-cache.cc
index 6811fd2e7..f7e5456ef 100644
--- a/deps/v8/src/stub-cache.cc
+++ b/deps/v8/src/stub-cache.cc
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2006-2009 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -32,7 +32,8 @@
#include "ic-inl.h"
#include "stub-cache.h"
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
// -----------------------------------------------------------------------
// StubCache implementation.
@@ -369,6 +370,7 @@ Object* StubCache::ComputeKeyedStoreField(String* name, JSObject* receiver,
Object* StubCache::ComputeCallConstant(int argc,
+ InLoopFlag in_loop,
String* name,
Object* object,
JSObject* holder,
@@ -387,7 +389,10 @@ Object* StubCache::ComputeCallConstant(int argc,
}
Code::Flags flags =
- Code::ComputeMonomorphicFlags(Code::CALL_IC, CONSTANT_FUNCTION, argc);
+ Code::ComputeMonomorphicFlags(Code::CALL_IC,
+ CONSTANT_FUNCTION,
+ in_loop,
+ argc);
Object* code = map->FindInCodeCache(name, flags);
if (code->IsUndefined()) {
if (object->IsJSObject()) {
@@ -405,7 +410,7 @@ Object* StubCache::ComputeCallConstant(int argc,
if (!function->is_compiled()) return Failure::InternalError();
// Compile the stub - only create stubs for fully compiled functions.
CallStubCompiler compiler(argc);
- code = compiler.CompileCallConstant(object, holder, function, check);
+ code = compiler.CompileCallConstant(object, holder, function, check, flags);
if (code->IsFailure()) return code;
LOG(CodeCreateEvent("CallIC", Code::cast(code), name));
Object* result = map->UpdateCodeCache(name, Code::cast(code));
@@ -416,6 +421,7 @@ Object* StubCache::ComputeCallConstant(int argc,
Object* StubCache::ComputeCallField(int argc,
+ InLoopFlag in_loop,
String* name,
Object* object,
JSObject* holder,
@@ -430,11 +436,14 @@ Object* StubCache::ComputeCallField(int argc,
object = holder;
}
- Code::Flags flags = Code::ComputeMonomorphicFlags(Code::CALL_IC, FIELD, argc);
+ Code::Flags flags = Code::ComputeMonomorphicFlags(Code::CALL_IC,
+ FIELD,
+ in_loop,
+ argc);
Object* code = map->FindInCodeCache(name, flags);
if (code->IsUndefined()) {
CallStubCompiler compiler(argc);
- code = compiler.CompileCallField(object, holder, index, name);
+ code = compiler.CompileCallField(object, holder, index, name, flags);
if (code->IsFailure()) return code;
LOG(CodeCreateEvent("CallIC", Code::cast(code), name));
Object* result = map->UpdateCodeCache(name, Code::cast(code));
@@ -460,7 +469,10 @@ Object* StubCache::ComputeCallInterceptor(int argc,
}
Code::Flags flags =
- Code::ComputeMonomorphicFlags(Code::CALL_IC, INTERCEPTOR, argc);
+ Code::ComputeMonomorphicFlags(Code::CALL_IC,
+ INTERCEPTOR,
+ NOT_IN_LOOP,
+ argc);
Object* code = map->FindInCodeCache(name, flags);
if (code->IsUndefined()) {
CallStubCompiler compiler(argc);
@@ -475,9 +487,10 @@ Object* StubCache::ComputeCallInterceptor(int argc,
Object* StubCache::ComputeCallNormal(int argc,
+ InLoopFlag in_loop,
String* name,
JSObject* receiver) {
- Object* code = ComputeCallNormal(argc);
+ Object* code = ComputeCallNormal(argc, in_loop);
if (code->IsFailure()) return code;
return Set(name, receiver->map(), Code::cast(code));
}
@@ -522,9 +535,9 @@ static Object* FillCache(Object* code) {
}
-Code* StubCache::FindCallInitialize(int argc) {
+Code* StubCache::FindCallInitialize(int argc, InLoopFlag in_loop) {
Code::Flags flags =
- Code::ComputeFlags(Code::CALL_IC, UNINITIALIZED, NORMAL, argc);
+ Code::ComputeFlags(Code::CALL_IC, in_loop, UNINITIALIZED, NORMAL, argc);
Object* result = ProbeCache(flags);
ASSERT(!result->IsUndefined());
// This might be called during the marking phase of the collector
@@ -533,9 +546,9 @@ Code* StubCache::FindCallInitialize(int argc) {
}
-Object* StubCache::ComputeCallInitialize(int argc) {
+Object* StubCache::ComputeCallInitialize(int argc, InLoopFlag in_loop) {
Code::Flags flags =
- Code::ComputeFlags(Code::CALL_IC, UNINITIALIZED, NORMAL, argc);
+ Code::ComputeFlags(Code::CALL_IC, in_loop, UNINITIALIZED, NORMAL, argc);
Object* probe = ProbeCache(flags);
if (!probe->IsUndefined()) return probe;
StubCompiler compiler;
@@ -543,20 +556,9 @@ Object* StubCache::ComputeCallInitialize(int argc) {
}
-Object* StubCache::ComputeCallInitializeInLoop(int argc) {
+Object* StubCache::ComputeCallPreMonomorphic(int argc, InLoopFlag in_loop) {
Code::Flags flags =
- Code::ComputeFlags(Code::CALL_IC, UNINITIALIZED_IN_LOOP, NORMAL, argc);
- Object* probe = ProbeCache(flags);
- if (!probe->IsUndefined()) return probe;
- StubCompiler compiler;
- return FillCache(compiler.CompileCallInitialize(flags));
-}
-
-
-
-Object* StubCache::ComputeCallPreMonomorphic(int argc) {
- Code::Flags flags =
- Code::ComputeFlags(Code::CALL_IC, PREMONOMORPHIC, NORMAL, argc);
+ Code::ComputeFlags(Code::CALL_IC, in_loop, PREMONOMORPHIC, NORMAL, argc);
Object* probe = ProbeCache(flags);
if (!probe->IsUndefined()) return probe;
StubCompiler compiler;
@@ -564,9 +566,9 @@ Object* StubCache::ComputeCallPreMonomorphic(int argc) {
}
-Object* StubCache::ComputeCallNormal(int argc) {
+Object* StubCache::ComputeCallNormal(int argc, InLoopFlag in_loop) {
Code::Flags flags =
- Code::ComputeFlags(Code::CALL_IC, MONOMORPHIC, NORMAL, argc);
+ Code::ComputeFlags(Code::CALL_IC, in_loop, MONOMORPHIC, NORMAL, argc);
Object* probe = ProbeCache(flags);
if (!probe->IsUndefined()) return probe;
StubCompiler compiler;
@@ -574,9 +576,9 @@ Object* StubCache::ComputeCallNormal(int argc) {
}
-Object* StubCache::ComputeCallMegamorphic(int argc) {
+Object* StubCache::ComputeCallMegamorphic(int argc, InLoopFlag in_loop) {
Code::Flags flags =
- Code::ComputeFlags(Code::CALL_IC, MEGAMORPHIC, NORMAL, argc);
+ Code::ComputeFlags(Code::CALL_IC, in_loop, MEGAMORPHIC, NORMAL, argc);
Object* probe = ProbeCache(flags);
if (!probe->IsUndefined()) return probe;
StubCompiler compiler;
@@ -586,7 +588,7 @@ Object* StubCache::ComputeCallMegamorphic(int argc) {
Object* StubCache::ComputeCallMiss(int argc) {
Code::Flags flags =
- Code::ComputeFlags(Code::STUB, MEGAMORPHIC, NORMAL, argc);
+ Code::ComputeFlags(Code::STUB, NOT_IN_LOOP, MEGAMORPHIC, NORMAL, argc);
Object* probe = ProbeCache(flags);
if (!probe->IsUndefined()) return probe;
StubCompiler compiler;
@@ -597,7 +599,7 @@ Object* StubCache::ComputeCallMiss(int argc) {
#ifdef ENABLE_DEBUGGER_SUPPORT
Object* StubCache::ComputeCallDebugBreak(int argc) {
Code::Flags flags =
- Code::ComputeFlags(Code::CALL_IC, DEBUG_BREAK, NORMAL, argc);
+ Code::ComputeFlags(Code::CALL_IC, NOT_IN_LOOP, DEBUG_BREAK, NORMAL, argc);
Object* probe = ProbeCache(flags);
if (!probe->IsUndefined()) return probe;
StubCompiler compiler;
@@ -607,7 +609,11 @@ Object* StubCache::ComputeCallDebugBreak(int argc) {
Object* StubCache::ComputeCallDebugPrepareStepIn(int argc) {
Code::Flags flags =
- Code::ComputeFlags(Code::CALL_IC, DEBUG_PREPARE_STEP_IN, NORMAL, argc);
+ Code::ComputeFlags(Code::CALL_IC,
+ NOT_IN_LOOP,
+ DEBUG_PREPARE_STEP_IN,
+ NORMAL,
+ argc);
Object* probe = ProbeCache(flags);
if (!probe->IsUndefined()) return probe;
StubCompiler compiler;
@@ -618,7 +624,7 @@ Object* StubCache::ComputeCallDebugPrepareStepIn(int argc) {
Object* StubCache::ComputeLazyCompile(int argc) {
Code::Flags flags =
- Code::ComputeFlags(Code::STUB, UNINITIALIZED, NORMAL, argc);
+ Code::ComputeFlags(Code::STUB, NOT_IN_LOOP, UNINITIALIZED, NORMAL, argc);
Object* probe = ProbeCache(flags);
if (!probe->IsUndefined()) return probe;
StubCompiler compiler;
@@ -713,10 +719,12 @@ Object* LoadInterceptorProperty(Arguments args) {
JSObject* recv = JSObject::cast(args[0]);
JSObject* holder = JSObject::cast(args[1]);
String* name = String::cast(args[2]);
+ Smi* lookup_hint = Smi::cast(args[3]);
ASSERT(holder->HasNamedInterceptor());
PropertyAttributes attr = NONE;
- Object* result = holder->GetPropertyWithInterceptor(recv, name, &attr);
+ Object* result = holder->GetInterceptorPropertyWithLookupHint(
+ recv, lookup_hint, name, &attr);
if (result->IsFailure()) return result;
// If the property is present, return it.
@@ -917,7 +925,10 @@ Object* KeyedStoreStubCompiler::GetCode(PropertyType type, String* name) {
Object* CallStubCompiler::GetCode(PropertyType type, String* name) {
int argc = arguments_.immediate();
- Code::Flags flags = Code::ComputeMonomorphicFlags(Code::CALL_IC, type, argc);
+ Code::Flags flags = Code::ComputeMonomorphicFlags(Code::CALL_IC,
+ type,
+ NOT_IN_LOOP,
+ argc);
return GetCodeWithFlags(flags, name);
}
diff --git a/deps/v8/src/stub-cache.h b/deps/v8/src/stub-cache.h
index 369b15da9..b79841a36 100644
--- a/deps/v8/src/stub-cache.h
+++ b/deps/v8/src/stub-cache.h
@@ -30,7 +30,8 @@
#include "macro-assembler.h"
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
// The stub cache is used for megamorphic calls and property accesses.
@@ -127,18 +128,23 @@ class StubCache : public AllStatic {
// ---
static Object* ComputeCallField(int argc,
+ InLoopFlag in_loop,
String* name,
Object* object,
JSObject* holder,
int index);
static Object* ComputeCallConstant(int argc,
+ InLoopFlag in_loop,
String* name,
Object* object,
JSObject* holder,
JSFunction* function);
- static Object* ComputeCallNormal(int argc, String* name, JSObject* receiver);
+ static Object* ComputeCallNormal(int argc,
+ InLoopFlag in_loop,
+ String* name,
+ JSObject* receiver);
static Object* ComputeCallInterceptor(int argc,
String* name,
@@ -147,15 +153,14 @@ class StubCache : public AllStatic {
// ---
- static Object* ComputeCallInitialize(int argc);
- static Object* ComputeCallInitializeInLoop(int argc);
- static Object* ComputeCallPreMonomorphic(int argc);
- static Object* ComputeCallNormal(int argc);
- static Object* ComputeCallMegamorphic(int argc);
+ static Object* ComputeCallInitialize(int argc, InLoopFlag in_loop);
+ static Object* ComputeCallPreMonomorphic(int argc, InLoopFlag in_loop);
+ static Object* ComputeCallNormal(int argc, InLoopFlag in_loop);
+ static Object* ComputeCallMegamorphic(int argc, InLoopFlag in_loop);
static Object* ComputeCallMiss(int argc);
// Finds the Code object stored in the Heap::non_monomorphic_cache().
- static Code* FindCallInitialize(int argc);
+ static Code* FindCallInitialize(int argc, InLoopFlag in_loop);
#ifdef ENABLE_DEBUGGER_SUPPORT
static Object* ComputeCallDebugBreak(int argc);
@@ -208,8 +213,12 @@ class StubCache : public AllStatic {
// 4Gb (and not at all if it isn't).
uint32_t map_low32bits =
static_cast<uint32_t>(reinterpret_cast<uintptr_t>(map));
+ // We always set the in_loop bit to zero when generating the lookup code
+ // so do it here too so the hash codes match.
+ uint32_t iflags =
+ (static_cast<uint32_t>(flags) & ~Code::kFlagsNotUsedInLookup);
// Base the offset on a simple combination of name, flags, and map.
- uint32_t key = (map_low32bits + field) ^ flags;
+ uint32_t key = (map_low32bits + field) ^ iflags;
return key & ((kPrimaryTableSize - 1) << kHeapObjectTagSize);
}
@@ -217,7 +226,11 @@ class StubCache : public AllStatic {
// Use the seed from the primary cache in the secondary cache.
uint32_t string_low32bits =
static_cast<uint32_t>(reinterpret_cast<uintptr_t>(name));
- uint32_t key = seed - string_low32bits + flags;
+ // We always set the in_loop bit to zero when generating the lookup code
+ // so do it here too so the hash codes match.
+ uint32_t iflags =
+ (static_cast<uint32_t>(flags) & ~Code::kFlagsICInLoopMask);
+ uint32_t key = seed - string_low32bits + iflags;
return key & ((kSecondaryTableSize - 1) << kHeapObjectTagSize);
}
@@ -338,6 +351,7 @@ class StubCompiler BASE_EMBEDDED {
static void GenerateLoadInterceptor(MacroAssembler* masm,
JSObject* object,
JSObject* holder,
+ Smi* lookup_hint,
Register receiver,
Register name,
Register scratch1,
@@ -468,11 +482,13 @@ class CallStubCompiler: public StubCompiler {
Object* CompileCallField(Object* object,
JSObject* holder,
int index,
- String* name);
+ String* name,
+ Code::Flags flags);
Object* CompileCallConstant(Object* object,
JSObject* holder,
JSFunction* function,
- CheckType check);
+ CheckType check,
+ Code::Flags flags);
Object* CompileCallInterceptor(Object* object,
JSObject* holder,
String* name);
diff --git a/deps/v8/src/token.cc b/deps/v8/src/token.cc
index 3f92707d0..bb42cead4 100644
--- a/deps/v8/src/token.cc
+++ b/deps/v8/src/token.cc
@@ -29,7 +29,8 @@
#include "token.h"
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
#ifdef DEBUG
#define T(name, string, precedence) #name,
diff --git a/deps/v8/src/token.h b/deps/v8/src/token.h
index 0f194a3c9..4d4df6345 100644
--- a/deps/v8/src/token.h
+++ b/deps/v8/src/token.h
@@ -28,7 +28,8 @@
#ifndef V8_TOKEN_H_
#define V8_TOKEN_H_
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
// TOKEN_LIST takes a list of 3 macros M, all of which satisfy the
// same signature M(name, string, precedence), where name is the
@@ -197,7 +198,7 @@ namespace v8 { namespace internal {
T(ILLEGAL, "ILLEGAL", 0) \
\
/* Scanner-internal use only. */ \
- T(COMMENT, NULL, 0)
+ T(WHITESPACE, NULL, 0)
class Token {
diff --git a/deps/v8/src/top.cc b/deps/v8/src/top.cc
index 96f98a5a2..42a2b7edf 100644
--- a/deps/v8/src/top.cc
+++ b/deps/v8/src/top.cc
@@ -34,7 +34,8 @@
#include "string-stream.h"
#include "platform.h"
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
ThreadLocalTop Top::thread_local_;
Mutex* Top::break_access_ = OS::CreateMutex();
@@ -44,6 +45,7 @@ NoAllocationStringAllocator* preallocated_message_space = NULL;
Address top_addresses[] = {
#define C(name) reinterpret_cast<Address>(Top::name()),
TOP_ADDRESS_LIST(C)
+ TOP_ADDRESS_LIST_PROF(C)
#undef C
NULL
};
@@ -90,6 +92,9 @@ void Top::Iterate(ObjectVisitor* v) {
void Top::InitializeThreadLocal() {
thread_local_.c_entry_fp_ = 0;
thread_local_.handler_ = 0;
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ thread_local_.js_entry_sp_ = 0;
+#endif
thread_local_.stack_is_cooked_ = false;
thread_local_.try_catch_handler_ = NULL;
thread_local_.context_ = NULL;
@@ -881,6 +886,15 @@ Handle<Context> Top::global_context() {
}
+Handle<Context> Top::GetCallingGlobalContext() {
+ JavaScriptFrameIterator it;
+ if (it.done()) return Handle<Context>::null();
+ JavaScriptFrame* frame = it.frame();
+ Context* context = Context::cast(frame->context());
+ return Handle<Context>(context->global_context());
+}
+
+
Object* Top::LookupSpecialFunction(JSObject* receiver,
JSObject* prototype,
JSFunction* function) {
diff --git a/deps/v8/src/top.h b/deps/v8/src/top.h
index 1e5ec5a8a..53d67e555 100644
--- a/deps/v8/src/top.h
+++ b/deps/v8/src/top.h
@@ -30,7 +30,8 @@
#include "frames-inl.h"
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
#define RETURN_IF_SCHEDULED_EXCEPTION() \
@@ -64,6 +65,9 @@ class ThreadLocalTop BASE_EMBEDDED {
// Stack.
Address c_entry_fp_; // the frame pointer of the top c entry frame
Address handler_; // try-blocks are chained through the stack
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ Address js_entry_sp_; // the stack pointer of the bottom js entry frame
+#endif
bool stack_is_cooked_;
inline bool stack_is_cooked() { return stack_is_cooked_; }
inline void set_stack_is_cooked(bool value) { stack_is_cooked_ = value; }
@@ -82,11 +86,20 @@ class ThreadLocalTop BASE_EMBEDDED {
C(pending_exception_address) \
C(external_caught_exception_address)
+#ifdef ENABLE_LOGGING_AND_PROFILING
+#define TOP_ADDRESS_LIST_PROF(C) \
+ C(js_entry_sp_address)
+#else
+#define TOP_ADDRESS_LIST_PROF(C)
+#endif
+
+
class Top {
public:
enum AddressId {
#define C(name) k_##name,
TOP_ADDRESS_LIST(C)
+ TOP_ADDRESS_LIST_PROF(C)
#undef C
k_top_address_count
};
@@ -178,6 +191,16 @@ class Top {
}
static inline Address* handler_address() { return &thread_local_.handler_; }
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ // Bottom JS entry (see StackTracer::Trace in log.cc).
+ static Address js_entry_sp(ThreadLocalTop* thread) {
+ return thread->js_entry_sp_;
+ }
+ static inline Address* js_entry_sp_address() {
+ return &thread_local_.js_entry_sp_;
+ }
+#endif
+
// Generated code scratch locations.
static void* formal_count_address() { return &thread_local_.formal_count_; }
@@ -255,8 +278,13 @@ class Top {
return context()->global_proxy();
}
+ // Returns the current global context.
static Handle<Context> global_context();
+ // Returns the global context of the calling JavaScript code. That
+ // is, the global context of the top-most JavaScript frame.
+ static Handle<Context> GetCallingGlobalContext();
+
static Handle<JSBuiltinsObject> builtins() {
return Handle<JSBuiltinsObject>(thread_local_.context_->builtins());
}
diff --git a/deps/v8/src/usage-analyzer.cc b/deps/v8/src/usage-analyzer.cc
index 13176f7a4..36464fa59 100644
--- a/deps/v8/src/usage-analyzer.cc
+++ b/deps/v8/src/usage-analyzer.cc
@@ -31,7 +31,8 @@
#include "scopes.h"
#include "usage-analyzer.h"
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
// Weight boundaries
static const int MinWeight = 1;
@@ -444,6 +445,7 @@ WeightScaler::~WeightScaler() {
bool AnalyzeVariableUsage(FunctionLiteral* lit) {
if (!FLAG_usage_computation) return true;
+ HistogramTimerScope timer(&Counters::usage_analysis);
return UsageComputer::Traverse(lit);
}
diff --git a/deps/v8/src/usage-analyzer.h b/deps/v8/src/usage-analyzer.h
index 2369422b9..1b0ea4a0f 100644
--- a/deps/v8/src/usage-analyzer.h
+++ b/deps/v8/src/usage-analyzer.h
@@ -28,7 +28,8 @@
#ifndef V8_USAGE_ANALYZER_H_
#define V8_USAGE_ANALYZER_H_
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
// Compute usage counts for all variables.
// Used for variable allocation.
diff --git a/deps/v8/src/utils.cc b/deps/v8/src/utils.cc
index 392032021..d56d27980 100644
--- a/deps/v8/src/utils.cc
+++ b/deps/v8/src/utils.cc
@@ -33,7 +33,8 @@
#include "sys/stat.h"
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
// Implementation is from "Hacker's Delight" by Henry S. Warren, Jr.,
@@ -86,6 +87,20 @@ byte* EncodeUnsignedIntBackward(byte* p, unsigned int x) {
}
+// Thomas Wang, Integer Hash Functions.
+// http://www.concentric.net/~Ttwang/tech/inthash.htm
+uint32_t ComputeIntegerHash(uint32_t key) {
+ uint32_t hash = key;
+ hash = ~hash + (hash << 15); // hash = (hash << 15) - hash - 1;
+ hash = hash ^ (hash >> 12);
+ hash = hash + (hash << 2);
+ hash = hash ^ (hash >> 4);
+ hash = hash * 2057; // hash = (hash + (hash << 3)) + (hash << 11);
+ hash = hash ^ (hash >> 16);
+ return hash;
+}
+
+
void PrintF(const char* format, ...) {
va_list arguments;
va_start(arguments, format);
diff --git a/deps/v8/src/utils.h b/deps/v8/src/utils.h
index 0febe4a8d..137e2c4f0 100644
--- a/deps/v8/src/utils.h
+++ b/deps/v8/src/utils.h
@@ -30,7 +30,8 @@
#include <stdlib.h>
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
// ----------------------------------------------------------------------------
// General helper functions
@@ -42,8 +43,6 @@ static inline bool IsPowerOf2(T x) {
}
-
-
// The C++ standard leaves the semantics of '>>' undefined for
// negative signed operands. Most implementations do the right thing,
// though.
@@ -56,7 +55,7 @@ static inline int ArithmeticShiftRight(int x, int s) {
// This allows conversion of Addresses and integral types into
// 0-relative int offsets.
template <typename T>
-static inline int OffsetFrom(T x) {
+static inline intptr_t OffsetFrom(T x) {
return x - static_cast<T>(0);
}
@@ -65,7 +64,7 @@ static inline int OffsetFrom(T x) {
// This allows conversion of 0-relative int offsets into Addresses and
// integral types.
template <typename T>
-static inline T AddressFrom(int x) {
+static inline T AddressFrom(intptr_t x) {
return static_cast<T>(0) + x;
}
@@ -207,6 +206,12 @@ inline byte* DecodeUnsignedIntBackward(byte* p, unsigned int* x) {
// ----------------------------------------------------------------------------
+// Hash function.
+
+uint32_t ComputeIntegerHash(uint32_t key);
+
+
+// ----------------------------------------------------------------------------
// I/O support.
// Our version of printf(). Avoids compilation errors that we get
@@ -374,6 +379,9 @@ class Vector {
// Factory method for creating empty vectors.
static Vector<T> empty() { return Vector<T>(NULL, 0); }
+ protected:
+ void set_start(T* start) { start_ = start; }
+
private:
T* start_;
int length_;
@@ -401,6 +409,22 @@ template <typename T, int kSize>
class EmbeddedVector : public Vector<T> {
public:
EmbeddedVector() : Vector<T>(buffer_, kSize) { }
+
+ // When copying, make underlying Vector to reference our buffer.
+ EmbeddedVector(const EmbeddedVector& rhs)
+ : Vector<T>(rhs) {
+ memcpy(buffer_, rhs.buffer_, sizeof(T) * kSize);
+ set_start(buffer_);
+ }
+
+ EmbeddedVector& operator=(const EmbeddedVector& rhs) {
+ if (this == &rhs) return *this;
+ Vector<T>::operator=(rhs);
+ memcpy(buffer_, rhs.buffer_, sizeof(T) * kSize);
+ set_start(buffer_);
+ return *this;
+ }
+
private:
T buffer_[kSize];
};
diff --git a/deps/v8/src/v8-counters.cc b/deps/v8/src/v8-counters.cc
index 3a8286a64..de2ce6695 100644
--- a/deps/v8/src/v8-counters.cc
+++ b/deps/v8/src/v8-counters.cc
@@ -29,7 +29,8 @@
#include "v8-counters.h"
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
#define HT(name, caption) \
HistogramTimer Counters::name = { #caption, NULL, false, 0, 0 }; \
diff --git a/deps/v8/src/v8-counters.h b/deps/v8/src/v8-counters.h
index 34156ea6f..4111312ea 100644
--- a/deps/v8/src/v8-counters.h
+++ b/deps/v8/src/v8-counters.h
@@ -30,18 +30,30 @@
#include "counters.h"
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
-#define HISTOGRAM_TIMER_LIST(HT) \
- HT(gc_compactor, V8.GCCompactor) /* GC Compactor time */ \
- HT(gc_scavenger, V8.GCScavenger) /* GC Scavenger time */ \
- HT(gc_context, V8.GCContext) /* GC context cleanup time */ \
- HT(compile, V8.Compile) /* Compile time*/ \
- HT(compile_eval, V8.CompileEval) /* Eval compile time */ \
- HT(compile_lazy, V8.CompileLazy) /* Lazy compile time */ \
- HT(parse, V8.Parse) /* Parse time */ \
- HT(parse_lazy, V8.ParseLazy) /* Lazy parse time */ \
- HT(pre_parse, V8.PreParse) /* Pre-parse time */
+#define HISTOGRAM_TIMER_LIST(HT) \
+ /* Garbage collection timers. */ \
+ HT(gc_compactor, V8.GCCompactor) \
+ HT(gc_scavenger, V8.GCScavenger) \
+ HT(gc_context, V8.GCContext) /* GC context cleanup time */ \
+ /* Parsing timers. */ \
+ HT(parse, V8.Parse) \
+ HT(parse_lazy, V8.ParseLazy) \
+ HT(pre_parse, V8.PreParse) \
+ /* Total compilation times. */ \
+ HT(compile, V8.Compile) \
+ HT(compile_eval, V8.CompileEval) \
+ HT(compile_lazy, V8.CompileLazy) \
+ /* Individual compiler passes. */ \
+ HT(rewriting, V8.Rewriting) \
+ HT(usage_analysis, V8.UsageAnalysis) \
+ HT(variable_allocation, V8.VariableAllocation) \
+ HT(ast_optimization, V8.ASTOptimization) \
+ HT(code_generation, V8.CodeGeneration) \
+ HT(deferred_code_generation, V8.DeferredCodeGeneration) \
+ HT(code_creation, V8.CodeCreation)
// WARNING: STATS_COUNTER_LIST_* is a very large macro that is causing MSVC
// Intellisense to crash. It was broken into two macros (each of length 40
@@ -124,7 +136,8 @@ namespace v8 { namespace internal {
SC(enum_cache_misses, V8.EnumCacheMisses) \
SC(reloc_info_count, V8.RelocInfoCount) \
SC(reloc_info_size, V8.RelocInfoSize) \
- SC(zone_segment_bytes, V8.ZoneSegmentBytes)
+ SC(zone_segment_bytes, V8.ZoneSegmentBytes) \
+ SC(compute_entry_frame, V8.ComputeEntryFrame)
// This file contains all the v8 counters that are in use.
diff --git a/deps/v8/src/v8.cc b/deps/v8/src/v8.cc
index c0124e4de..17cb2dfe7 100644
--- a/deps/v8/src/v8.cc
+++ b/deps/v8/src/v8.cc
@@ -33,16 +33,23 @@
#include "stub-cache.h"
#include "oprofile-agent.h"
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
+bool V8::is_running_ = false;
bool V8::has_been_setup_ = false;
bool V8::has_been_disposed_ = false;
+bool V8::has_fatal_error_ = false;
bool V8::Initialize(Deserializer *des) {
bool create_heap_objects = des == NULL;
- if (HasBeenDisposed()) return false;
- if (HasBeenSetup()) return true;
+ if (has_been_disposed_ || has_fatal_error_) return false;
+ if (IsRunning()) return true;
+
+ is_running_ = true;
has_been_setup_ = true;
+ has_fatal_error_ = false;
+ has_been_disposed_ = false;
#ifdef DEBUG
// The initialization process does not handle memory exhaustion.
DisallowAllocationFailure disallow_allocation_failure;
@@ -58,7 +65,7 @@ bool V8::Initialize(Deserializer *des) {
// Setup the object heap
ASSERT(!Heap::HasBeenSetup());
if (!Heap::Setup(create_heap_objects)) {
- has_been_setup_ = false;
+ SetFatalError();
return false;
}
@@ -94,9 +101,14 @@ bool V8::Initialize(Deserializer *des) {
}
+void V8::SetFatalError() {
+ is_running_ = false;
+ has_fatal_error_ = true;
+}
+
+
void V8::TearDown() {
- if (HasBeenDisposed()) return;
- if (!HasBeenSetup()) return;
+ if (!has_been_setup_ || has_been_disposed_) return;
OProfileAgent::TearDown();
@@ -113,8 +125,9 @@ void V8::TearDown() {
Heap::TearDown();
Logger::TearDown();
- has_been_setup_ = false;
+ is_running_ = false;
has_been_disposed_ = true;
}
+
} } // namespace v8::internal
diff --git a/deps/v8/src/v8.h b/deps/v8/src/v8.h
index 4ced0d2b4..8cb3c7da1 100644
--- a/deps/v8/src/v8.h
+++ b/deps/v8/src/v8.h
@@ -73,7 +73,8 @@
#include "heap-inl.h"
#include "messages.h"
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
class V8 : public AllStatic {
public:
@@ -85,13 +86,23 @@ class V8 : public AllStatic {
// deserialized data into an empty heap.
static bool Initialize(Deserializer* des);
static void TearDown();
- static bool HasBeenSetup() { return has_been_setup_; }
- static bool HasBeenDisposed() { return has_been_disposed_; }
+ static bool IsRunning() { return is_running_; }
+ // To be dead you have to have lived
+ static bool IsDead() { return has_fatal_error_ || has_been_disposed_; }
+ static void SetFatalError();
// Report process out of memory. Implementation found in api.cc.
static void FatalProcessOutOfMemory(const char* location);
private:
+ // True if engine is currently running
+ static bool is_running_;
+ // True if V8 has ever been run
static bool has_been_setup_;
+ // True if error has been signaled for current engine
+ // (reset to false if engine is restarted)
+ static bool has_fatal_error_;
+ // True if engine has been shut down
+ // (reset if engine is restarted)
static bool has_been_disposed_;
};
diff --git a/deps/v8/src/v8natives.js b/deps/v8/src/v8natives.js
index 55bc9f8ff..fe463513d 100644
--- a/deps/v8/src/v8natives.js
+++ b/deps/v8/src/v8natives.js
@@ -115,12 +115,16 @@ function GlobalParseFloat(string) {
function GlobalEval(x) {
if (!IS_STRING(x)) return x;
- if (this !== global && this !== %GlobalReceiver(global)) {
- throw new $EvalError('The "this" object passed to eval must ' +
+ var global_receiver = %GlobalReceiver(global);
+ var this_is_global_receiver = (this === global_receiver);
+ var global_is_detached = (global === global_receiver);
+
+ if (!this_is_global_receiver || global_is_detached) {
+ throw new $EvalError('The "this" object passed to eval must ' +
'be the global object from which eval originated');
}
-
- var f = %CompileString(x, 0, false);
+
+ var f = %CompileString(x, false);
if (!IS_FUNCTION(f)) return f;
return f.call(this);
@@ -131,7 +135,7 @@ function GlobalEval(x) {
function GlobalExecScript(expr, lang) {
// NOTE: We don't care about the character casing.
if (!lang || /javascript/i.test(lang)) {
- var f = %CompileString(ToString(expr), 0, false);
+ var f = %CompileString(ToString(expr), false);
f.call(%GlobalReceiver(global));
}
return null;
@@ -550,7 +554,7 @@ function NewFunction(arg1) { // length == 1
// The call to SetNewFunctionAttributes will ensure the prototype
// property of the resulting function is enumerable (ECMA262, 15.3.5.2).
- var f = %CompileString(source, -1, false)();
+ var f = %CompileString(source, false)();
%FunctionSetName(f, "anonymous");
return %SetNewFunctionAttributes(f);
}
diff --git a/deps/v8/src/v8threads.cc b/deps/v8/src/v8threads.cc
index 838cae772..c5fc9fa7e 100644
--- a/deps/v8/src/v8threads.cc
+++ b/deps/v8/src/v8threads.cc
@@ -261,6 +261,8 @@ void ThreadManager::EagerlyArchiveThread() {
ThreadState* state = lazily_archived_thread_state_;
state->LinkInto(ThreadState::IN_USE_LIST);
char* to = state->data();
+ // Ensure that data containing GC roots are archived first, and handle them
+ // in ThreadManager::Iterate(ObjectVisitor*).
to = HandleScopeImplementer::ArchiveThread(to);
to = Top::ArchiveThread(to);
#ifdef ENABLE_DEBUGGER_SUPPORT
diff --git a/deps/v8/src/v8threads.h b/deps/v8/src/v8threads.h
index b651fc344..83f69f060 100644
--- a/deps/v8/src/v8threads.h
+++ b/deps/v8/src/v8threads.h
@@ -28,7 +28,8 @@
#ifndef V8_V8THREADS_H_
#define V8_V8THREADS_H_
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
class ThreadState {
diff --git a/deps/v8/src/variables.cc b/deps/v8/src/variables.cc
index 51eb8caf8..6c9f82f08 100644
--- a/deps/v8/src/variables.cc
+++ b/deps/v8/src/variables.cc
@@ -31,7 +31,8 @@
#include "scopes.h"
#include "variables.h"
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
// ----------------------------------------------------------------------------
// Implementation UseCount.
diff --git a/deps/v8/src/variables.h b/deps/v8/src/variables.h
index 275f498e8..50620718c 100644
--- a/deps/v8/src/variables.h
+++ b/deps/v8/src/variables.h
@@ -30,7 +30,8 @@
#include "zone.h"
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
class UseCount BASE_EMBEDDED {
public:
diff --git a/deps/v8/src/version.cc b/deps/v8/src/version.cc
index 0d2840d44..d613e940d 100644
--- a/deps/v8/src/version.cc
+++ b/deps/v8/src/version.cc
@@ -34,7 +34,7 @@
// cannot be changed without changing the SCons build script.
#define MAJOR_VERSION 1
#define MINOR_VERSION 2
-#define BUILD_NUMBER 3
+#define BUILD_NUMBER 7
#define PATCH_LEVEL 0
#define CANDIDATE_VERSION false
@@ -43,7 +43,8 @@
// number. This define is mainly used by the SCons build script.
#define SONAME ""
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
int Version::major_ = MAJOR_VERSION;
int Version::minor_ = MINOR_VERSION;
diff --git a/deps/v8/src/version.h b/deps/v8/src/version.h
index 423b5f7a1..c322a2fc0 100644
--- a/deps/v8/src/version.h
+++ b/deps/v8/src/version.h
@@ -28,7 +28,8 @@
#ifndef V8_VERSION_H_
#define V8_VERSION_H_
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
class Version {
public:
diff --git a/deps/v8/src/virtual-frame.cc b/deps/v8/src/virtual-frame.cc
index 566fcdbc0..39dbf1735 100644
--- a/deps/v8/src/virtual-frame.cc
+++ b/deps/v8/src/virtual-frame.cc
@@ -30,47 +30,27 @@
#include "codegen-inl.h"
#include "register-allocator-inl.h"
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
// -------------------------------------------------------------------------
// VirtualFrame implementation.
-VirtualFrame::SpilledScope::SpilledScope(CodeGenerator* cgen)
- : cgen_(cgen),
- previous_state_(cgen->in_spilled_code()) {
- ASSERT(cgen->has_valid_frame());
- cgen->frame()->SpillAll();
- cgen->set_in_spilled_code(true);
-}
-
-
-VirtualFrame::SpilledScope::~SpilledScope() {
- cgen_->set_in_spilled_code(previous_state_);
-}
-
-
// When cloned, a frame is a deep copy of the original.
VirtualFrame::VirtualFrame(VirtualFrame* original)
- : cgen_(original->cgen_),
- masm_(original->masm_),
- elements_(original->elements_.capacity()),
- parameter_count_(original->parameter_count_),
- local_count_(original->local_count_),
- stack_pointer_(original->stack_pointer_),
- frame_pointer_(original->frame_pointer_) {
- // Copy all the elements from the original.
- for (int i = 0; i < original->elements_.length(); i++) {
- elements_.Add(original->elements_[i]);
- }
- for (int i = 0; i < kNumRegisters; i++) {
- register_locations_[i] = original->register_locations_[i];
- }
+ : elements_(original->element_count()),
+ stack_pointer_(original->stack_pointer_) {
+ elements_.AddAll(original->elements_);
+ // Copy register locations from original.
+ memcpy(&register_locations_,
+ original->register_locations_,
+ sizeof(register_locations_));
}
FrameElement VirtualFrame::CopyElementAt(int index) {
ASSERT(index >= 0);
- ASSERT(index < elements_.length());
+ ASSERT(index < element_count());
FrameElement target = elements_[index];
FrameElement result;
@@ -94,10 +74,10 @@ FrameElement VirtualFrame::CopyElementAt(int index) {
case FrameElement::REGISTER:
// All copies are backed by memory or register locations.
result.set_static_type(target.static_type());
- result.type_ = FrameElement::COPY;
- result.copied_ = false;
- result.synced_ = false;
- result.data_.index_ = index;
+ result.set_type(FrameElement::COPY);
+ result.clear_copied();
+ result.clear_sync();
+ result.set_index(index);
elements_[index].set_copied();
break;
@@ -116,7 +96,7 @@ FrameElement VirtualFrame::CopyElementAt(int index) {
// pushing an exception handler). No code is emitted.
void VirtualFrame::Adjust(int count) {
ASSERT(count >= 0);
- ASSERT(stack_pointer_ == elements_.length() - 1);
+ ASSERT(stack_pointer_ == element_count() - 1);
for (int i = 0; i < count; i++) {
elements_.Add(FrameElement::MemoryElement());
@@ -125,22 +105,9 @@ void VirtualFrame::Adjust(int count) {
}
-// Modify the state of the virtual frame to match the actual frame by
-// removing elements from the top of the virtual frame. The elements will
-// be externally popped from the actual frame (eg, by a runtime call). No
-// code is emitted.
-void VirtualFrame::Forget(int count) {
- ASSERT(count >= 0);
- ASSERT(stack_pointer_ == elements_.length() - 1);
-
- stack_pointer_ -= count;
- ForgetElements(count);
-}
-
-
void VirtualFrame::ForgetElements(int count) {
ASSERT(count >= 0);
- ASSERT(elements_.length() >= count);
+ ASSERT(element_count() >= count);
for (int i = 0; i < count; i++) {
FrameElement last = elements_.RemoveLast();
@@ -148,47 +115,25 @@ void VirtualFrame::ForgetElements(int count) {
// A hack to properly count register references for the code
// generator's current frame and also for other frames. The
// same code appears in PrepareMergeTo.
- if (cgen_->frame() == this) {
+ if (cgen()->frame() == this) {
Unuse(last.reg());
} else {
- register_locations_[last.reg().code()] = kIllegalIndex;
+ set_register_location(last.reg(), kIllegalIndex);
}
}
}
}
-void VirtualFrame::Use(Register reg, int index) {
- ASSERT(register_locations_[reg.code()] == kIllegalIndex);
- register_locations_[reg.code()] = index;
- cgen_->allocator()->Use(reg);
-}
-
-
-void VirtualFrame::Unuse(Register reg) {
- ASSERT(register_locations_[reg.code()] != kIllegalIndex);
- register_locations_[reg.code()] = kIllegalIndex;
- cgen_->allocator()->Unuse(reg);
-}
-
-
-void VirtualFrame::Spill(Register target) {
- if (is_used(target)) {
- SpillElementAt(register_index(target));
- }
-}
-
-
// If there are any registers referenced only by the frame, spill one.
Register VirtualFrame::SpillAnyRegister() {
- // Find the leftmost (ordered by register code) register whose only
+ // Find the leftmost (ordered by register number) register whose only
// reference is in the frame.
- for (int i = 0; i < kNumRegisters; i++) {
- if (is_used(i) && cgen_->allocator()->count(i) == 1) {
- Register result = { i };
- Spill(result);
- ASSERT(!cgen_->allocator()->is_used(result));
- return result;
+ for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
+ if (is_used(i) && cgen()->allocator()->count(i) == 1) {
+ SpillElementAt(register_location(i));
+ ASSERT(!cgen()->allocator()->is_used(i));
+ return RegisterAllocator::ToRegister(i);
}
}
return no_reg;
@@ -227,7 +172,7 @@ void VirtualFrame::SyncElementAt(int index) {
// Make the type of all elements be MEMORY.
void VirtualFrame::SpillAll() {
- for (int i = 0; i < elements_.length(); i++) {
+ for (int i = 0; i < element_count(); i++) {
SpillElementAt(i);
}
}
@@ -237,7 +182,7 @@ void VirtualFrame::PrepareMergeTo(VirtualFrame* expected) {
// Perform state changes on this frame that will make merge to the
// expected frame simpler or else increase the likelihood that his
// frame will match another.
- for (int i = 0; i < elements_.length(); i++) {
+ for (int i = 0; i < element_count(); i++) {
FrameElement source = elements_[i];
FrameElement target = expected->elements_[i];
@@ -251,10 +196,10 @@ void VirtualFrame::PrepareMergeTo(VirtualFrame* expected) {
// If the frame is the code generator's current frame, we have
// to decrement both the frame-internal and global register
// counts.
- if (cgen_->frame() == this) {
+ if (cgen()->frame() == this) {
Unuse(source.reg());
} else {
- register_locations_[source.reg().code()] = kIllegalIndex;
+ set_register_location(source.reg(), kIllegalIndex);
}
}
elements_[i] = target;
@@ -266,12 +211,6 @@ void VirtualFrame::PrepareMergeTo(VirtualFrame* expected) {
ASSERT(source.is_valid());
elements_[i].clear_sync();
}
-
- elements_[i].clear_copied();
- if (elements_[i].is_copy()) {
- elements_[elements_[i].index()].set_copied();
- }
-
// No code needs to be generated to change the static type of an
// element.
elements_[i].set_static_type(target.static_type());
@@ -284,16 +223,16 @@ void VirtualFrame::PrepareForCall(int spilled_args, int dropped_args) {
ASSERT(height() >= spilled_args);
ASSERT(dropped_args <= spilled_args);
- SyncRange(0, elements_.length() - 1);
+ SyncRange(0, element_count() - 1);
// Spill registers.
- for (int i = 0; i < kNumRegisters; i++) {
+ for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
if (is_used(i)) {
- SpillElementAt(register_locations_[i]);
+ SpillElementAt(register_location(i));
}
}
// Spill the arguments.
- for (int i = elements_.length() - spilled_args; i < elements_.length(); i++) {
+ for (int i = element_count() - spilled_args; i < element_count(); i++) {
if (!elements_[i].is_memory()) {
SpillElementAt(i);
}
@@ -304,55 +243,32 @@ void VirtualFrame::PrepareForCall(int spilled_args, int dropped_args) {
}
-void VirtualFrame::DetachFromCodeGenerator() {
- // Tell the global register allocator that it is free to reallocate all
- // register references contained in this frame. The frame elements remain
- // register references, so the frame-internal reference count is not
- // decremented.
- for (int i = 0; i < elements_.length(); i++) {
- if (elements_[i].is_register()) {
- cgen_->allocator()->Unuse(elements_[i].reg());
- }
- }
-}
-
-
-void VirtualFrame::AttachToCodeGenerator() {
- // Tell the global register allocator that the frame-internal register
- // references are live again.
- for (int i = 0; i < elements_.length(); i++) {
- if (elements_[i].is_register()) {
- cgen_->allocator()->Use(elements_[i].reg());
- }
- }
-}
-
-
void VirtualFrame::PrepareForReturn() {
// Spill all locals. This is necessary to make sure all locals have
// the right value when breaking at the return site in the debugger.
- //
- // TODO(203): It is also necessary to ensure that merging at the
- // return site does not generate code to overwrite eax, where the
- // return value is kept in a non-refcounted register reference.
- for (int i = 0; i < expression_base_index(); i++) SpillElementAt(i);
+ // Set their static type to unknown so that they will match the known
+ // return frame.
+ for (int i = 0; i < expression_base_index(); i++) {
+ SpillElementAt(i);
+ elements_[i].set_static_type(StaticType::unknown());
+ }
}
void VirtualFrame::SetElementAt(int index, Result* value) {
- int frame_index = elements_.length() - index - 1;
+ int frame_index = element_count() - index - 1;
ASSERT(frame_index >= 0);
- ASSERT(frame_index < elements_.length());
+ ASSERT(frame_index < element_count());
ASSERT(value->is_valid());
FrameElement original = elements_[frame_index];
// Early exit if the element is the same as the one being set.
bool same_register = original.is_register()
- && value->is_register()
- && original.reg().is(value->reg());
+ && value->is_register()
+ && original.reg().is(value->reg());
bool same_constant = original.is_constant()
- && value->is_constant()
- && original.handle().is_identical_to(value->handle());
+ && value->is_constant()
+ && original.handle().is_identical_to(value->handle());
if (same_register || same_constant) {
value->Unuse();
return;
@@ -366,7 +282,7 @@ void VirtualFrame::SetElementAt(int index, Result* value) {
// The register already appears on the frame. Either the existing
// register element, or the new element at frame_index, must be made
// a copy.
- int i = register_index(value->reg());
+ int i = register_location(value->reg());
ASSERT(value->static_type() == elements_[i].static_type());
if (i < frame_index) {
@@ -382,8 +298,8 @@ void VirtualFrame::SetElementAt(int index, Result* value) {
elements_[i].set_sync();
}
elements_[frame_index].clear_sync();
- register_locations_[value->reg().code()] = frame_index;
- for (int j = i + 1; j < elements_.length(); j++) {
+ set_register_location(value->reg(), frame_index);
+ for (int j = i + 1; j < element_count(); j++) {
if (elements_[j].is_copy() && elements_[j].index() == i) {
elements_[j].set_index(frame_index);
}
@@ -408,25 +324,18 @@ void VirtualFrame::SetElementAt(int index, Result* value) {
void VirtualFrame::PushFrameSlotAt(int index) {
- FrameElement new_element = CopyElementAt(index);
- elements_.Add(new_element);
-}
-
-
-Result VirtualFrame::CallStub(CodeStub* stub, int arg_count) {
- PrepareForCall(arg_count, arg_count);
- return RawCallStub(stub);
+ elements_.Add(CopyElementAt(index));
}
void VirtualFrame::Push(Register reg, StaticType static_type) {
if (is_used(reg)) {
- int index = register_index(reg);
+ int index = register_location(reg);
FrameElement element = CopyElementAt(index);
ASSERT(static_type.merge(element.static_type()) == element.static_type());
elements_.Add(element);
} else {
- Use(reg, elements_.length());
+ Use(reg, element_count());
FrameElement element =
FrameElement::RegisterElement(reg,
FrameElement::NOT_SYNCED,
@@ -443,17 +352,6 @@ void VirtualFrame::Push(Handle<Object> value) {
}
-void VirtualFrame::Push(Result* result) {
- if (result->is_register()) {
- Push(result->reg(), result->static_type());
- } else {
- ASSERT(result->is_constant());
- Push(result->handle());
- }
- result->Unuse();
-}
-
-
void VirtualFrame::Nip(int num_dropped) {
ASSERT(num_dropped >= 0);
if (num_dropped == 0) return;
@@ -465,42 +363,17 @@ void VirtualFrame::Nip(int num_dropped) {
}
-bool FrameElement::Equals(FrameElement other) {
- if (type_ != other.type_ ||
- copied_ != other.copied_ ||
- synced_ != other.synced_) return false;
-
- if (is_register()) {
- if (!reg().is(other.reg())) return false;
- } else if (is_constant()) {
- if (!handle().is_identical_to(other.handle())) return false;
- } else if (is_copy()) {
- if (index() != other.index()) return false;
- }
-
- return true;
-}
-
-
bool VirtualFrame::Equals(VirtualFrame* other) {
#ifdef DEBUG
- // These are sanity checks in debug builds, but we do not need to
- // use them to distinguish frames at merge points.
- if (cgen_ != other->cgen_) return false;
- if (masm_ != other->masm_) return false;
- if (parameter_count_ != other->parameter_count_) return false;
- if (local_count_ != other->local_count_) return false;
- if (frame_pointer_ != other->frame_pointer_) return false;
-
- for (int i = 0; i < kNumRegisters; i++) {
- if (register_locations_[i] != other->register_locations_[i]) {
+ for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
+ if (register_location(i) != other->register_location(i)) {
return false;
}
}
- if (elements_.length() != other->elements_.length()) return false;
+ if (element_count() != other->element_count()) return false;
#endif
if (stack_pointer_ != other->stack_pointer_) return false;
- for (int i = 0; i < elements_.length(); i++) {
+ for (int i = 0; i < element_count(); i++) {
if (!elements_[i].Equals(other->elements_[i])) return false;
}
diff --git a/deps/v8/src/virtual-frame.h b/deps/v8/src/virtual-frame.h
index 794f1567c..293f9e534 100644
--- a/deps/v8/src/virtual-frame.h
+++ b/deps/v8/src/virtual-frame.h
@@ -28,180 +28,9 @@
#ifndef V8_VIRTUAL_FRAME_H_
#define V8_VIRTUAL_FRAME_H_
+#include "frame-element.h"
#include "macro-assembler.h"
-namespace v8 { namespace internal {
-
-// -------------------------------------------------------------------------
-// Virtual frame elements
-//
-// The internal elements of the virtual frames. There are several kinds of
-// elements:
-// * Invalid: elements that are uninitialized or not actually part
-// of the virtual frame. They should not be read.
-// * Memory: an element that resides in the actual frame. Its address is
-// given by its position in the virtual frame.
-// * Register: an element that resides in a register.
-// * Constant: an element whose value is known at compile time.
-
-class FrameElement BASE_EMBEDDED {
- public:
- enum SyncFlag {
- NOT_SYNCED,
- SYNCED
- };
-
- // The default constructor creates an invalid frame element.
- FrameElement()
- : static_type_(), type_(INVALID), copied_(false), synced_(false) {
- data_.reg_ = no_reg;
- }
-
- // Factory function to construct an invalid frame element.
- static FrameElement InvalidElement() {
- FrameElement result;
- return result;
- }
-
- // Factory function to construct an in-memory frame element.
- static FrameElement MemoryElement() {
- FrameElement result(MEMORY, no_reg, SYNCED);
- return result;
- }
-
- // Factory function to construct an in-register frame element.
- static FrameElement RegisterElement(Register reg,
- SyncFlag is_synced,
- StaticType static_type = StaticType()) {
- return FrameElement(REGISTER, reg, is_synced, static_type);
- }
-
- // Factory function to construct a frame element whose value is known at
- // compile time.
- static FrameElement ConstantElement(Handle<Object> value,
- SyncFlag is_synced) {
- FrameElement result(value, is_synced);
- return result;
- }
-
- bool is_synced() const { return synced_; }
-
- void set_sync() {
- ASSERT(type() != MEMORY);
- synced_ = true;
- }
-
- void clear_sync() {
- ASSERT(type() != MEMORY);
- synced_ = false;
- }
-
- bool is_valid() const { return type() != INVALID; }
- bool is_memory() const { return type() == MEMORY; }
- bool is_register() const { return type() == REGISTER; }
- bool is_constant() const { return type() == CONSTANT; }
- bool is_copy() const { return type() == COPY; }
-
- bool is_copied() const { return copied_; }
- void set_copied() { copied_ = true; }
- void clear_copied() { copied_ = false; }
-
- Register reg() const {
- ASSERT(is_register());
- return data_.reg_;
- }
-
- Handle<Object> handle() const {
- ASSERT(is_constant());
- return Handle<Object>(data_.handle_);
- }
-
- int index() const {
- ASSERT(is_copy());
- return data_.index_;
- }
-
- bool Equals(FrameElement other);
-
- StaticType static_type() { return static_type_; }
-
- void set_static_type(StaticType static_type) {
- // TODO(lrn): If it's s copy, it would be better to update the real one,
- // but we can't from here. The caller must handle this.
- static_type_ = static_type;
- }
-
- private:
- enum Type {
- INVALID,
- MEMORY,
- REGISTER,
- CONSTANT,
- COPY
- };
-
- Type type() const { return static_cast<Type>(type_); }
-
- StaticType static_type_;
-
- // The element's type.
- byte type_;
-
- bool copied_;
-
- // The element's dirty-bit. The dirty bit can be cleared
- // for non-memory elements to indicate that the element agrees with
- // the value in memory in the actual frame.
- bool synced_;
-
- union {
- Register reg_;
- Object** handle_;
- int index_;
- } data_;
-
- // Used to construct memory and register elements.
- FrameElement(Type type, Register reg, SyncFlag is_synced)
- : static_type_(),
- type_(type),
- copied_(false),
- synced_(is_synced != NOT_SYNCED) {
- data_.reg_ = reg;
- }
-
- FrameElement(Type type, Register reg, SyncFlag is_synced, StaticType stype)
- : static_type_(stype),
- type_(type),
- copied_(false),
- synced_(is_synced != NOT_SYNCED) {
- data_.reg_ = reg;
- }
-
- // Used to construct constant elements.
- FrameElement(Handle<Object> value, SyncFlag is_synced)
- : static_type_(StaticType::TypeOf(*value)),
- type_(CONSTANT),
- copied_(false),
- synced_(is_synced != NOT_SYNCED) {
- data_.handle_ = value.location();
- }
-
- void set_index(int new_index) {
- ASSERT(is_copy());
- data_.index_ = new_index;
- }
-
- void set_reg(Register new_reg) {
- ASSERT(is_register());
- data_.reg_ = new_reg;
- }
-
- friend class VirtualFrame;
-};
-
-
-} } // namespace v8::internal
-
#if V8_TARGET_ARCH_IA32
#include "ia32/virtual-frame-ia32.h"
#elif V8_TARGET_ARCH_X64
diff --git a/deps/v8/src/x64/assembler-x64-inl.h b/deps/v8/src/x64/assembler-x64-inl.h
index 0b018490d..18225681e 100644
--- a/deps/v8/src/x64/assembler-x64-inl.h
+++ b/deps/v8/src/x64/assembler-x64-inl.h
@@ -28,12 +28,141 @@
#ifndef V8_X64_ASSEMBLER_X64_INL_H_
#define V8_X64_ASSEMBLER_X64_INL_H_
-namespace v8 { namespace internal {
+#include "cpu.h"
+
+namespace v8 {
+namespace internal {
Condition NegateCondition(Condition cc) {
return static_cast<Condition>(cc ^ 1);
}
+// -----------------------------------------------------------------------------
+
+Immediate::Immediate(Smi* value) {
+ value_ = static_cast<int32_t>(reinterpret_cast<intptr_t>(value));
+}
+
+// -----------------------------------------------------------------------------
+// Implementation of Assembler
+
+
+
+void Assembler::emitl(uint32_t x) {
+ Memory::uint32_at(pc_) = x;
+ pc_ += sizeof(uint32_t);
+}
+
+
+void Assembler::emitq(uint64_t x, RelocInfo::Mode rmode) {
+ Memory::uint64_at(pc_) = x;
+ if (rmode != RelocInfo::NONE) {
+ RecordRelocInfo(rmode, x);
+ }
+ pc_ += sizeof(uint64_t);
+}
+
+
+void Assembler::emitw(uint16_t x) {
+ Memory::uint16_at(pc_) = x;
+ pc_ += sizeof(uint16_t);
+}
+
+
+void Assembler::emit_rex_64(Register reg, Register rm_reg) {
+ emit(0x48 | (reg.code() & 0x8) >> 1 | rm_reg.code() >> 3);
+}
+
+
+void Assembler::emit_rex_64(Register reg, const Operand& op) {
+ emit(0x48 | (reg.code() & 0x8) >> 1 | op.rex_);
+}
+
+
+void Assembler::emit_rex_64(Register rm_reg) {
+ ASSERT_EQ(rm_reg.code() & 0xf, rm_reg.code());
+ emit(0x48 | (rm_reg.code() >> 3));
+}
+
+
+void Assembler::emit_rex_64(const Operand& op) {
+ emit(0x48 | op.rex_);
+}
+
+
+void Assembler::emit_rex_32(Register reg, Register rm_reg) {
+ emit(0x40 | (reg.code() & 0x8) >> 1 | rm_reg.code() >> 3);
+}
+
+
+void Assembler::emit_rex_32(Register reg, const Operand& op) {
+ emit(0x40 | (reg.code() & 0x8) >> 1 | op.rex_);
+}
+
+
+void Assembler::emit_rex_32(Register rm_reg) {
+ emit(0x40 | (rm_reg.code() & 0x8) >> 3);
+}
+
+
+void Assembler::emit_rex_32(const Operand& op) {
+ emit(0x40 | op.rex_);
+}
+
+
+void Assembler::emit_optional_rex_32(Register reg, Register rm_reg) {
+ byte rex_bits = (reg.code() & 0x8) >> 1 | rm_reg.code() >> 3;
+ if (rex_bits != 0) emit(0x40 | rex_bits);
+}
+
+
+void Assembler::emit_optional_rex_32(Register reg, const Operand& op) {
+ byte rex_bits = (reg.code() & 0x8) >> 1 | op.rex_;
+ if (rex_bits != 0) emit(0x40 | rex_bits);
+}
+
+
+void Assembler::emit_optional_rex_32(Register rm_reg) {
+ if (rm_reg.code() & 0x8 != 0) emit(0x41);
+}
+
+
+void Assembler::emit_optional_rex_32(const Operand& op) {
+ if (op.rex_ != 0) emit(0x40 | op.rex_);
+}
+
+
+Address Assembler::target_address_at(Address pc) {
+ return Memory::Address_at(pc);
+}
+
+
+void Assembler::set_target_address_at(Address pc, Address target) {
+ Memory::Address_at(pc) = target;
+ CPU::FlushICache(pc, sizeof(intptr_t));
+}
+
+
+// -----------------------------------------------------------------------------
+// Implementation of RelocInfo
+
+// The modes possibly affected by apply must be in kApplyMask.
+void RelocInfo::apply(int delta) {
+ if (rmode_ == RUNTIME_ENTRY || IsCodeTarget(rmode_)) {
+ intptr_t* p = reinterpret_cast<intptr_t*>(pc_);
+ *p -= delta; // relocate entry
+ } else if (rmode_ == JS_RETURN && IsCallInstruction()) {
+ // Special handling of js_return when a break point is set (call
+ // instruction has been inserted).
+ intptr_t* p = reinterpret_cast<intptr_t*>(pc_ + 1);
+ *p -= delta; // relocate entry
+ } else if (IsInternalReference(rmode_)) {
+ // absolute code pointer inside code object moves with the code object.
+ intptr_t* p = reinterpret_cast<intptr_t*>(pc_);
+ *p += delta; // relocate entry
+ }
+}
+
Address RelocInfo::target_address() {
ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
@@ -53,16 +182,125 @@ void RelocInfo::set_target_address(Address target) {
}
-void Assembler::set_target_address_at(byte* location, byte* value) {
- UNIMPLEMENTED();
+Object* RelocInfo::target_object() {
+ ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
+ return *reinterpret_cast<Object**>(pc_);
}
-byte* Assembler::target_address_at(byte* location) {
- UNIMPLEMENTED();
- return NULL;
+Object** RelocInfo::target_object_address() {
+ ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
+ return reinterpret_cast<Object**>(pc_);
}
+
+Address* RelocInfo::target_reference_address() {
+ ASSERT(rmode_ == RelocInfo::EXTERNAL_REFERENCE);
+ return reinterpret_cast<Address*>(pc_);
+}
+
+
+void RelocInfo::set_target_object(Object* target) {
+ ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
+ *reinterpret_cast<Object**>(pc_) = target;
+}
+
+
+bool RelocInfo::IsCallInstruction() {
+ UNIMPLEMENTED(); // IA32 code below.
+ return *pc_ == 0xE8;
+}
+
+
+Address RelocInfo::call_address() {
+ UNIMPLEMENTED(); // IA32 code below.
+ ASSERT(IsCallInstruction());
+ return Assembler::target_address_at(pc_ + 1);
+}
+
+
+void RelocInfo::set_call_address(Address target) {
+ UNIMPLEMENTED(); // IA32 code below.
+ ASSERT(IsCallInstruction());
+ Assembler::set_target_address_at(pc_ + 1, target);
+}
+
+
+Object* RelocInfo::call_object() {
+ UNIMPLEMENTED(); // IA32 code below.
+ ASSERT(IsCallInstruction());
+ return *call_object_address();
+}
+
+
+void RelocInfo::set_call_object(Object* target) {
+ UNIMPLEMENTED(); // IA32 code below.
+ ASSERT(IsCallInstruction());
+ *call_object_address() = target;
+}
+
+
+Object** RelocInfo::call_object_address() {
+ UNIMPLEMENTED(); // IA32 code below.
+ ASSERT(IsCallInstruction());
+ return reinterpret_cast<Object**>(pc_ + 1);
+}
+
+// -----------------------------------------------------------------------------
+// Implementation of Operand
+
+Operand::Operand(Register base, int32_t disp) {
+ len_ = 1;
+ if (base.is(rsp) || base.is(r12)) {
+ // SIB byte is needed to encode (rsp + offset) or (r12 + offset).
+ set_sib(kTimes1, rsp, base);
+ }
+
+ if (disp == 0 && !base.is(rbp) && !base.is(r13)) {
+ set_modrm(0, rsp);
+ } else if (is_int8(disp)) {
+ set_modrm(1, base);
+ set_disp8(disp);
+ } else {
+ set_modrm(2, base);
+ set_disp32(disp);
+ }
+}
+
+void Operand::set_modrm(int mod, Register rm) {
+ ASSERT((mod & -4) == 0);
+ buf_[0] = mod << 6 | (rm.code() & 0x7);
+ // Set REX.B to the high bit of rm.code().
+ rex_ |= (rm.code() >> 3);
+}
+
+
+void Operand::set_sib(ScaleFactor scale, Register index, Register base) {
+ ASSERT(len_ == 1);
+ ASSERT(is_uint2(scale));
+ // Use SIB with no index register only for base rsp or r12.
+ ASSERT(!index.is(rsp) || base.is(rsp) || base.is(r12));
+ buf_[1] = scale << 6 | (index.code() & 0x7) << 3 | (base.code() & 0x7);
+ rex_ |= (index.code() >> 3) << 1 | base.code() >> 3;
+ len_ = 2;
+}
+
+void Operand::set_disp8(int disp) {
+ ASSERT(is_int8(disp));
+ ASSERT(len_ == 1 || len_ == 2);
+ int8_t* p = reinterpret_cast<int8_t*>(&buf_[len_]);
+ *p = disp;
+ len_ += sizeof(int8_t);
+}
+
+void Operand::set_disp32(int disp) {
+ ASSERT(len_ == 1 || len_ == 2);
+ int32_t* p = reinterpret_cast<int32_t*>(&buf_[len_]);
+ *p = disp;
+ len_ += sizeof(int32_t);
+}
+
+
} } // namespace v8::internal
#endif // V8_X64_ASSEMBLER_X64_INL_H_
diff --git a/deps/v8/src/x64/assembler-x64.cc b/deps/v8/src/x64/assembler-x64.cc
index 6e2c42a12..77bbf5240 100644
--- a/deps/v8/src/x64/assembler-x64.cc
+++ b/deps/v8/src/x64/assembler-x64.cc
@@ -28,9 +28,1520 @@
#include "v8.h"
#include "macro-assembler.h"
+#include "serialize.h"
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
+
+// -----------------------------------------------------------------------------
+// Implementation of Register
+
+Register rax = { 0 };
+Register rcx = { 1 };
+Register rdx = { 2 };
+Register rbx = { 3 };
+Register rsp = { 4 };
+Register rbp = { 5 };
+Register rsi = { 6 };
+Register rdi = { 7 };
+Register r8 = { 8 };
+Register r9 = { 9 };
+Register r10 = { 10 };
+Register r11 = { 11 };
+Register r12 = { 12 };
+Register r13 = { 13 };
+Register r14 = { 14 };
+Register r15 = { 15 };
Register no_reg = { -1 };
+XMMRegister xmm0 = { 0 };
+XMMRegister xmm1 = { 1 };
+XMMRegister xmm2 = { 2 };
+XMMRegister xmm3 = { 3 };
+XMMRegister xmm4 = { 4 };
+XMMRegister xmm5 = { 5 };
+XMMRegister xmm6 = { 6 };
+XMMRegister xmm7 = { 7 };
+XMMRegister xmm8 = { 8 };
+XMMRegister xmm9 = { 9 };
+XMMRegister xmm10 = { 10 };
+XMMRegister xmm11 = { 11 };
+XMMRegister xmm12 = { 12 };
+XMMRegister xmm13 = { 13 };
+XMMRegister xmm14 = { 14 };
+XMMRegister xmm15 = { 15 };
+
+// Safe default is no features.
+uint64_t CpuFeatures::supported_ = 0;
+uint64_t CpuFeatures::enabled_ = 0;
+
+void CpuFeatures::Probe() {
+ ASSERT(Heap::HasBeenSetup());
+ ASSERT(supported_ == 0);
+ if (Serializer::enabled()) return; // No features if we might serialize.
+
+ Assembler assm(NULL, 0);
+ Label cpuid, done;
+#define __ assm.
+ // Save old esp, since we are going to modify the stack.
+ __ push(rbp);
+ __ pushfq();
+ __ push(rcx);
+ __ push(rbx);
+ __ movq(rbp, rsp);
+
+ // If we can modify bit 21 of the EFLAGS register, then CPUID is supported.
+ __ pushfq();
+ __ pop(rax);
+ __ movq(rdx, rax);
+ __ xor_(rax, Immediate(0x200000)); // Flip bit 21.
+ __ push(rax);
+ __ popfq();
+ __ pushfq();
+ __ pop(rax);
+ __ xor_(rax, rdx); // Different if CPUID is supported.
+ __ j(not_zero, &cpuid);
+
+ // CPUID not supported. Clear the supported features in edx:eax.
+ __ xor_(rax, rax);
+ __ jmp(&done);
+
+ // Invoke CPUID with 1 in eax to get feature information in
+ // ecx:edx. Temporarily enable CPUID support because we know it's
+ // safe here.
+ __ bind(&cpuid);
+ __ movq(rax, Immediate(1));
+ supported_ = (1 << CPUID);
+ { Scope fscope(CPUID);
+ __ cpuid();
+ }
+ supported_ = 0;
+
+ // Move the result from ecx:edx to rax and make sure to mark the
+ // CPUID feature as supported.
+ __ movl(rax, rdx); // Zero-extended to 64 bits.
+ __ shl(rcx, Immediate(32));
+ __ or_(rax, rcx);
+ __ or_(rax, Immediate(1 << CPUID));
+
+ // Done.
+ __ bind(&done);
+ __ movq(rsp, rbp);
+ __ pop(rbx);
+ __ pop(rcx);
+ __ popfq();
+ __ pop(rbp);
+ __ ret(0);
+#undef __
+
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Object* code =
+ Heap::CreateCode(desc, NULL, Code::ComputeFlags(Code::STUB), NULL);
+ if (!code->IsCode()) return;
+ LOG(CodeCreateEvent("Builtin", Code::cast(code), "CpuFeatures::Probe"));
+ typedef uint64_t (*F0)();
+ F0 probe = FUNCTION_CAST<F0>(Code::cast(code)->entry());
+ supported_ = probe();
+}
+
+// -----------------------------------------------------------------------------
+// Implementation of Assembler
+
+#ifdef GENERATED_CODE_COVERAGE
+static void InitCoverageLog();
+#endif
+
+byte* Assembler::spare_buffer_ = NULL;
+
+Assembler::Assembler(void* buffer, int buffer_size) {
+ if (buffer == NULL) {
+ // do our own buffer management
+ if (buffer_size <= kMinimalBufferSize) {
+ buffer_size = kMinimalBufferSize;
+
+ if (spare_buffer_ != NULL) {
+ buffer = spare_buffer_;
+ spare_buffer_ = NULL;
+ }
+ }
+ if (buffer == NULL) {
+ buffer_ = NewArray<byte>(buffer_size);
+ } else {
+ buffer_ = static_cast<byte*>(buffer);
+ }
+ buffer_size_ = buffer_size;
+ own_buffer_ = true;
+ } else {
+ // use externally provided buffer instead
+ ASSERT(buffer_size > 0);
+ buffer_ = static_cast<byte*>(buffer);
+ buffer_size_ = buffer_size;
+ own_buffer_ = false;
+ }
+
+ // Clear the buffer in debug mode unless it was provided by the
+ // caller in which case we can't be sure it's okay to overwrite
+ // existing code in it; see CodePatcher::CodePatcher(...).
+#ifdef DEBUG
+ if (own_buffer_) {
+ memset(buffer_, 0xCC, buffer_size); // int3
+ }
+#endif
+
+ // setup buffer pointers
+ ASSERT(buffer_ != NULL);
+ pc_ = buffer_;
+ reloc_info_writer.Reposition(buffer_ + buffer_size, pc_);
+
+ last_pc_ = NULL;
+ current_statement_position_ = RelocInfo::kNoPosition;
+ current_position_ = RelocInfo::kNoPosition;
+ written_statement_position_ = current_statement_position_;
+ written_position_ = current_position_;
+#ifdef GENERATED_CODE_COVERAGE
+ InitCoverageLog();
+#endif
+}
+
+
+Assembler::~Assembler() {
+ if (own_buffer_) {
+ if (spare_buffer_ == NULL && buffer_size_ == kMinimalBufferSize) {
+ spare_buffer_ = buffer_;
+ } else {
+ DeleteArray(buffer_);
+ }
+ }
+}
+
+
+void Assembler::GetCode(CodeDesc* desc) {
+ // finalize code
+ // (at this point overflow() may be true, but the gap ensures that
+ // we are still not overlapping instructions and relocation info)
+ ASSERT(pc_ <= reloc_info_writer.pos()); // no overlap
+ // setup desc
+ desc->buffer = buffer_;
+ desc->buffer_size = buffer_size_;
+ desc->instr_size = pc_offset();
+ desc->reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
+ desc->origin = this;
+
+ Counters::reloc_info_size.Increment(desc->reloc_size);
+}
+
+
+void Assembler::Align(int m) {
+ ASSERT(IsPowerOf2(m));
+ while ((pc_offset() & (m - 1)) != 0) {
+ nop();
+ }
+}
+
+
+void Assembler::bind_to(Label* L, int pos) {
+ ASSERT(!L->is_bound()); // Label may only be bound once.
+ last_pc_ = NULL;
+ ASSERT(0 <= pos && pos <= pc_offset()); // Position must be valid.
+ if (L->is_linked()) {
+ int current = L->pos();
+ int next = long_at(current);
+ while (next != current) {
+ // relative address, relative to point after address
+ int imm32 = pos - (current + sizeof(int32_t));
+ long_at_put(current, imm32);
+ current = next;
+ next = long_at(next);
+ }
+ // Fix up last fixup on linked list.
+ int last_imm32 = pos - (current + sizeof(int32_t));
+ long_at_put(current, last_imm32);
+ }
+ L->bind_to(pos);
+}
+
+
+void Assembler::bind(Label* L) {
+ bind_to(L, pc_offset());
+}
+
+
+void Assembler::GrowBuffer() {
+ ASSERT(overflow()); // should not call this otherwise
+ if (!own_buffer_) FATAL("external code buffer is too small");
+
+ // compute new buffer size
+ CodeDesc desc; // the new buffer
+ if (buffer_size_ < 4*KB) {
+ desc.buffer_size = 4*KB;
+ } else {
+ desc.buffer_size = 2*buffer_size_;
+ }
+ // Some internal data structures overflow for very large buffers,
+ // they must ensure that kMaximalBufferSize is not too large.
+ if ((desc.buffer_size > kMaximalBufferSize) ||
+ (desc.buffer_size > Heap::OldGenerationSize())) {
+ V8::FatalProcessOutOfMemory("Assembler::GrowBuffer");
+ }
+
+ // setup new buffer
+ desc.buffer = NewArray<byte>(desc.buffer_size);
+ desc.instr_size = pc_offset();
+ desc.reloc_size = (buffer_ + buffer_size_) - (reloc_info_writer.pos());
+
+ // Clear the buffer in debug mode. Use 'int3' instructions to make
+ // sure to get into problems if we ever run uninitialized code.
+#ifdef DEBUG
+ memset(desc.buffer, 0xCC, desc.buffer_size);
+#endif
+
+ // copy the data
+ int pc_delta = desc.buffer - buffer_;
+ int rc_delta = (desc.buffer + desc.buffer_size) - (buffer_ + buffer_size_);
+ memmove(desc.buffer, buffer_, desc.instr_size);
+ memmove(rc_delta + reloc_info_writer.pos(),
+ reloc_info_writer.pos(), desc.reloc_size);
+
+ // switch buffers
+ if (spare_buffer_ == NULL && buffer_size_ == kMinimalBufferSize) {
+ spare_buffer_ = buffer_;
+ } else {
+ DeleteArray(buffer_);
+ }
+ buffer_ = desc.buffer;
+ buffer_size_ = desc.buffer_size;
+ pc_ += pc_delta;
+ if (last_pc_ != NULL) {
+ last_pc_ += pc_delta;
+ }
+ reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
+ reloc_info_writer.last_pc() + pc_delta);
+
+ // relocate runtime entries
+ for (RelocIterator it(desc); !it.done(); it.next()) {
+ RelocInfo::Mode rmode = it.rinfo()->rmode();
+ if (rmode == RelocInfo::RUNTIME_ENTRY) {
+ int32_t* p = reinterpret_cast<int32_t*>(it.rinfo()->pc());
+ *p -= pc_delta; // relocate entry
+ } else if (rmode == RelocInfo::INTERNAL_REFERENCE) {
+ int32_t* p = reinterpret_cast<int32_t*>(it.rinfo()->pc());
+ if (*p != 0) { // 0 means uninitialized.
+ *p += pc_delta;
+ }
+ }
+ }
+
+ ASSERT(!overflow());
+}
+
+
+void Assembler::emit_operand(int rm, const Operand& adr) {
+ ASSERT_EQ(rm & 0x07, rm);
+ const unsigned length = adr.len_;
+ ASSERT(length > 0);
+
+ // Emit updated ModR/M byte containing the given register.
+ pc_[0] = (adr.buf_[0] & ~0x38) | (rm << 3);
+
+ // Emit the rest of the encoded operand.
+ for (unsigned i = 1; i < length; i++) pc_[i] = adr.buf_[i];
+ pc_ += length;
+}
+
+
+// Assembler Instruction implementations
+
+void Assembler::arithmetic_op(byte opcode, Register reg, const Operand& op) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_rex_64(reg, op);
+ emit(opcode);
+ emit_operand(reg, op);
+}
+
+
+void Assembler::arithmetic_op(byte opcode, Register dst, Register src) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_rex_64(dst, src);
+ emit(opcode);
+ emit_modrm(dst, src);
+}
+
+void Assembler::immediate_arithmetic_op(byte subcode,
+ Register dst,
+ Immediate src) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_rex_64(dst);
+ if (is_int8(src.value_)) {
+ emit(0x83);
+ emit_modrm(subcode, dst);
+ emit(src.value_);
+ } else if (dst.is(rax)) {
+ emit(0x05 | (subcode << 3));
+ emitl(src.value_);
+ } else {
+ emit(0x81);
+ emit_modrm(subcode, dst);
+ emitl(src.value_);
+ }
+}
+
+void Assembler::immediate_arithmetic_op(byte subcode,
+ const Operand& dst,
+ Immediate src) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_rex_64(dst);
+ if (is_int8(src.value_)) {
+ emit(0x83);
+ emit_operand(Register::toRegister(subcode), dst);
+ emit(src.value_);
+ } else {
+ emit(0x81);
+ emit_operand(Register::toRegister(subcode), dst);
+ emitl(src.value_);
+ }
+}
+
+
+void Assembler::shift(Register dst, Immediate shift_amount, int subcode) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ ASSERT(is_uint6(shift_amount.value_)); // illegal shift count
+ if (shift_amount.value_ == 1) {
+ emit_rex_64(dst);
+ emit(0xD1);
+ emit_modrm(subcode, dst);
+ } else {
+ emit_rex_64(dst);
+ emit(0xC1);
+ emit_modrm(subcode, dst);
+ emit(shift_amount.value_);
+ }
+}
+
+
+void Assembler::shift(Register dst, int subcode) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_rex_64(dst);
+ emit(0xD3);
+ emit_modrm(subcode, dst);
+}
+
+
+void Assembler::bt(const Operand& dst, Register src) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_rex_64(src, dst);
+ emit(0x0F);
+ emit(0xA3);
+ emit_operand(src, dst);
+}
+
+
+void Assembler::bts(const Operand& dst, Register src) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_rex_64(src, dst);
+ emit(0x0F);
+ emit(0xAB);
+ emit_operand(src, dst);
+}
+
+
+void Assembler::call(Label* L) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ // 1110 1000 #32-bit disp
+ emit(0xE8);
+ if (L->is_bound()) {
+ int offset = L->pos() - pc_offset() - sizeof(int32_t);
+ ASSERT(offset <= 0);
+ emitl(offset);
+ } else if (L->is_linked()) {
+ emitl(L->pos());
+ L->link_to(pc_offset() - sizeof(int32_t));
+ } else {
+ ASSERT(L->is_unused());
+ int32_t current = pc_offset();
+ emitl(current);
+ L->link_to(current);
+ }
+}
+
+
+void Assembler::call(Register adr) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ // Opcode: FF /2 r64
+ if (adr.code() > 7) {
+ emit_rex_64(adr);
+ }
+ emit(0xFF);
+ emit_modrm(0x2, adr);
+}
+
+void Assembler::cpuid() {
+ ASSERT(CpuFeatures::IsEnabled(CpuFeatures::CPUID));
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit(0x0F);
+ emit(0xA2);
+}
+
+
+void Assembler::call(const Operand& op) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ // Opcode: FF /2 m64
+ emit_rex_64(op);
+ emit(0xFF);
+ emit_operand(2, op);
+}
+
+
+void Assembler::cqo() {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_rex_64();
+ emit(0x99);
+}
+
+
+void Assembler::dec(Register dst) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_rex_64(dst);
+ emit(0xFF);
+ emit_modrm(0x1, dst);
+}
+
+
+void Assembler::dec(const Operand& dst) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_rex_64(dst);
+ emit(0xFF);
+ emit_operand(1, dst);
+}
+
+
+void Assembler::enter(Immediate size) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit(0xC8);
+ emitw(size.value_); // 16 bit operand, always.
+ emit(0);
+}
+
+
+void Assembler::hlt() {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit(0xF4);
+}
+
+
+void Assembler::idiv(Register src) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_rex_64(src);
+ emit(0xF7);
+ emit_modrm(0x7, src);
+}
+
+
+void Assembler::imul(Register dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_rex_64(dst, src);
+ emit(0x0F);
+ emit(0xAF);
+ emit_operand(dst, src);
+}
+
+
+void Assembler::imul(Register dst, Register src, Immediate imm) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_rex_64(dst, src);
+ if (is_int8(imm.value_)) {
+ emit(0x6B);
+ emit_modrm(dst, src);
+ emit(imm.value_);
+ } else {
+ emit(0x69);
+ emit_modrm(dst, src);
+ emitl(imm.value_);
+ }
+}
+
+
+void Assembler::inc(Register dst) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_rex_64(dst);
+ emit(0xFF);
+ emit_modrm(0x0, dst);
+}
+
+
+void Assembler::inc(const Operand& dst) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_rex_64(dst);
+ emit(0xFF);
+ emit_operand(0, dst);
+}
+
+
+void Assembler::int3() {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit(0xCC);
+}
+
+
+void Assembler::j(Condition cc, Label* L) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ ASSERT(0 <= cc && cc < 16);
+ if (L->is_bound()) {
+ const int short_size = 2;
+ const int long_size = 6;
+ int offs = L->pos() - pc_offset();
+ ASSERT(offs <= 0);
+ if (is_int8(offs - short_size)) {
+ // 0111 tttn #8-bit disp
+ emit(0x70 | cc);
+ emit((offs - short_size) & 0xFF);
+ } else {
+ // 0000 1111 1000 tttn #32-bit disp
+ emit(0x0F);
+ emit(0x80 | cc);
+ emitl(offs - long_size);
+ }
+ } else if (L->is_linked()) {
+ // 0000 1111 1000 tttn #32-bit disp
+ emit(0x0F);
+ emit(0x80 | cc);
+ emitl(L->pos());
+ L->link_to(pc_offset() - sizeof(int32_t));
+ } else {
+ ASSERT(L->is_unused());
+ emit(0x0F);
+ emit(0x80 | cc);
+ int32_t current = pc_offset();
+ emitl(current);
+ L->link_to(current);
+ }
+}
+
+
+void Assembler::jmp(Label* L) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ if (L->is_bound()) {
+ int offs = L->pos() - pc_offset() - 1;
+ ASSERT(offs <= 0);
+ if (is_int8(offs - sizeof(int8_t))) {
+ // 1110 1011 #8-bit disp
+ emit(0xEB);
+ emit((offs - sizeof(int8_t)) & 0xFF);
+ } else {
+ // 1110 1001 #32-bit disp
+ emit(0xE9);
+ emitl(offs - sizeof(int32_t));
+ }
+ } else if (L->is_linked()) {
+ // 1110 1001 #32-bit disp
+ emit(0xE9);
+ emitl(L->pos());
+ L->link_to(pc_offset() - sizeof(int32_t));
+ } else {
+ // 1110 1001 #32-bit disp
+ ASSERT(L->is_unused());
+ emit(0xE9);
+ int32_t current = pc_offset();
+ emitl(current);
+ L->link_to(current);
+ }
+}
+
+
+void Assembler::jmp(Register target) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ // Opcode FF/4 r64
+ if (target.code() > 7) {
+ emit_rex_64(target);
+ }
+ emit(0xFF);
+ emit_modrm(0x4, target);
+}
+
+
+void Assembler::lea(Register dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_rex_64(dst, src);
+ emit(0x8D);
+ emit_operand(dst, src);
+}
+
+
+void Assembler::load_rax(void* value, RelocInfo::Mode mode) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit(0x48); // REX.W
+ emit(0xA1);
+ emitq(reinterpret_cast<uintptr_t>(value), mode);
+}
+
+
+void Assembler::load_rax(ExternalReference ref) {
+ load_rax(ref.address(), RelocInfo::EXTERNAL_REFERENCE);
+}
+
+
+void Assembler::leave() {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit(0xC9);
+}
+
+
+void Assembler::movb(Register dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_rex_32(dst, src);
+ emit(0x8A);
+ emit_operand(dst, src);
+}
+
+void Assembler::movb(Register dst, Immediate imm) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_rex_32(dst);
+ emit(0xC6);
+ emit_modrm(0x0, dst);
+ emit(imm.value_);
+}
+
+void Assembler::movb(const Operand& dst, Register src) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_rex_32(src, dst);
+ emit(0x88);
+ emit_operand(src, dst);
+}
+
+void Assembler::movl(Register dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_optional_rex_32(dst, src);
+ emit(0x8B);
+ emit_operand(dst, src);
+}
+
+
+void Assembler::movl(Register dst, Register src) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_optional_rex_32(dst, src);
+ emit(0x8B);
+ emit_modrm(dst, src);
+}
+
+
+void Assembler::movl(const Operand& dst, Register src) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_optional_rex_32(src, dst);
+ emit(0x89);
+ emit_operand(src, dst);
+}
+
+
+void Assembler::movl(Register dst, Immediate value) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_optional_rex_32(dst);
+ emit(0xC7);
+ emit_modrm(0x0, dst);
+ emit(value); // Only 32-bit immediates are possible, not 8-bit immediates.
+}
+
+
+void Assembler::movq(Register dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_rex_64(dst, src);
+ emit(0x8B);
+ emit_operand(dst, src);
+}
+
+
+void Assembler::movq(Register dst, Register src) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_rex_64(dst, src);
+ emit(0x8B);
+ emit_modrm(dst, src);
+}
+
+
+void Assembler::movq(Register dst, Immediate value) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_rex_64(dst);
+ emit(0xC7);
+ emit_modrm(0x0, dst);
+ emit(value); // Only 32-bit immediates are possible, not 8-bit immediates.
+}
+
+
+void Assembler::movq(const Operand& dst, Register src) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_rex_64(src, dst);
+ emit(0x89);
+ emit_operand(src, dst);
+}
+
+
+void Assembler::movq(Register dst, void* value, RelocInfo::Mode rmode) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_rex_64(dst);
+ emit(0xB8 | (dst.code() & 0x7));
+ emitq(reinterpret_cast<uintptr_t>(value), rmode);
+}
+
+
+void Assembler::movq(Register dst, int64_t value, RelocInfo::Mode rmode) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_rex_64(dst);
+ emit(0xB8 | (dst.code() & 0x7)); // Not a ModR/M byte.
+ emitq(value, rmode);
+}
+
+
+void Assembler::movq(Register dst, ExternalReference ref) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_rex_64(dst);
+ emit(0xB8 | (dst.code() & 0x7));
+ emitq(reinterpret_cast<uintptr_t>(ref.address()),
+ RelocInfo::EXTERNAL_REFERENCE);
+}
+
+
+void Assembler::mul(Register src) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_rex_64(src);
+ emit(0xF7);
+ emit_modrm(0x4, src);
+}
+
+
+void Assembler::neg(Register dst) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_rex_64(dst);
+ emit(0xF7);
+ emit_modrm(0x3, dst);
+}
+
+
+void Assembler::neg(const Operand& dst) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_rex_64(dst);
+ emit(0xF7);
+ emit_operand(3, dst);
+}
+
+
+void Assembler::nop() {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit(0x90);
+}
+
+
+void Assembler::not_(Register dst) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_rex_64(dst);
+ emit(0xF7);
+ emit_modrm(0x2, dst);
+}
+
+
+void Assembler::not_(const Operand& dst) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_rex_64(dst);
+ emit(0xF7);
+ emit_operand(2, dst);
+}
+
+
+void Assembler::nop(int n) {
+ // The recommended muti-byte sequences of NOP instructions from the Intel 64
+ // and IA-32 Architectures Software Developer's Manual.
+ //
+ // Length Assembly Byte Sequence
+ // 2 bytes 66 NOP 66 90H
+ // 3 bytes NOP DWORD ptr [EAX] 0F 1F 00H
+ // 4 bytes NOP DWORD ptr [EAX + 00H] 0F 1F 40 00H
+ // 5 bytes NOP DWORD ptr [EAX + EAX*1 + 00H] 0F 1F 44 00 00H
+ // 6 bytes 66 NOP DWORD ptr [EAX + EAX*1 + 00H] 66 0F 1F 44 00 00H
+ // 7 bytes NOP DWORD ptr [EAX + 00000000H] 0F 1F 80 00 00 00 00H
+ // 8 bytes NOP DWORD ptr [EAX + EAX*1 + 00000000H] 0F 1F 84 00 00 00 00 00H
+ // 9 bytes 66 NOP DWORD ptr [EAX + EAX*1 + 66 0F 1F 84 00 00 00 00
+ // 00000000H] 00H
+
+ ASSERT(1 <= n);
+ ASSERT(n <= 9);
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ switch (n) {
+ case 1:
+ emit(0x90);
+ return;
+ case 2:
+ emit(0x66);
+ emit(0x90);
+ return;
+ case 3:
+ emit(0x0f);
+ emit(0x1f);
+ emit(0x00);
+ return;
+ case 4:
+ emit(0x0f);
+ emit(0x1f);
+ emit(0x40);
+ emit(0x00);
+ return;
+ case 5:
+ emit(0x0f);
+ emit(0x1f);
+ emit(0x44);
+ emit(0x00);
+ emit(0x00);
+ return;
+ case 6:
+ emit(0x66);
+ emit(0x0f);
+ emit(0x1f);
+ emit(0x44);
+ emit(0x00);
+ emit(0x00);
+ return;
+ case 7:
+ emit(0x0f);
+ emit(0x1f);
+ emit(0x80);
+ emit(0x00);
+ emit(0x00);
+ emit(0x00);
+ emit(0x00);
+ return;
+ case 8:
+ emit(0x0f);
+ emit(0x1f);
+ emit(0x84);
+ emit(0x00);
+ emit(0x00);
+ emit(0x00);
+ emit(0x00);
+ emit(0x00);
+ return;
+ case 9:
+ emit(0x66);
+ emit(0x0f);
+ emit(0x1f);
+ emit(0x84);
+ emit(0x00);
+ emit(0x00);
+ emit(0x00);
+ emit(0x00);
+ emit(0x00);
+ return;
+ }
+}
+
+
+void Assembler::pop(Register dst) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ if (dst.code() > 7) {
+ emit_rex_64(dst);
+ }
+ emit(0x58 | (dst.code() & 0x7));
+}
+
+
+void Assembler::pop(const Operand& dst) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_rex_64(dst); // Could be omitted in some cases.
+ emit(0x8F);
+ emit_operand(0, dst);
+}
+
+
+void Assembler::popfq() {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit(0x9D);
+}
+
+
+void Assembler::push(Register src) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ if (src.code() > 7) {
+ emit_rex_64(src);
+ }
+ emit(0x50 | (src.code() & 0x7));
+}
+
+
+void Assembler::push(const Operand& src) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_rex_64(src); // Could be omitted in some cases.
+ emit(0xFF);
+ emit_operand(6, src);
+}
+
+
+void Assembler::push(Immediate value) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ if (is_int8(value.value_)) {
+ emit(0x6A);
+ emit(value.value_); // Emit low byte of value.
+ } else {
+ emit(0x68);
+ emitl(value.value_);
+ }
+}
+
+
+void Assembler::pushfq() {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit(0x9C);
+}
+
+
+void Assembler::rcl(Register dst, uint8_t imm8) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ ASSERT(is_uint6(imm8)); // illegal shift count
+ if (imm8 == 1) {
+ emit_rex_64(dst);
+ emit(0xD1);
+ emit_modrm(0x2, dst);
+ } else {
+ emit_rex_64(dst);
+ emit(0xC1);
+ emit_modrm(0x2, dst);
+ emit(imm8);
+ }
+}
+
+
+void Assembler::ret(int imm16) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ ASSERT(is_uint16(imm16));
+ if (imm16 == 0) {
+ emit(0xC3);
+ } else {
+ emit(0xC2);
+ emit(imm16 & 0xFF);
+ emit((imm16 >> 8) & 0xFF);
+ }
+}
+
+
+void Assembler::shld(Register dst, Register src) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_rex_64(src, dst);
+ emit(0x0F);
+ emit(0xA5);
+ emit_modrm(src, dst);
+}
+
+
+void Assembler::shrd(Register dst, Register src) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_rex_64(src, dst);
+ emit(0x0F);
+ emit(0xAD);
+ emit_modrm(src, dst);
+}
+
+
+void Assembler::xchg(Register dst, Register src) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ if (src.is(rax) || dst.is(rax)) { // Single-byte encoding
+ Register other = src.is(rax) ? dst : src;
+ emit_rex_64(other);
+ emit(0x90 | (other.code() & 0x7));
+ } else {
+ emit_rex_64(src, dst);
+ emit(0x87);
+ emit_modrm(src, dst);
+ }
+}
+
+
+void Assembler::store_rax(void* dst, RelocInfo::Mode mode) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit(0x48); // REX.W
+ emit(0xA3);
+ emitq(reinterpret_cast<uintptr_t>(dst), mode);
+}
+
+
+void Assembler::store_rax(ExternalReference ref) {
+ store_rax(ref.address(), RelocInfo::EXTERNAL_REFERENCE);
+}
+
+
+void Assembler::testb(Register reg, Immediate mask) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ if (reg.is(rax)) {
+ emit(0xA8);
+ emit(mask);
+ } else {
+ if (reg.code() > 3) {
+ // Register is not one of al, bl, cl, dl. Its encoding needs REX.
+ emit_rex_32(reg);
+ }
+ emit(0xF6);
+ emit_modrm(0x0, reg);
+ emit(mask.value_); // Low byte emitted.
+ }
+}
+
+
+void Assembler::testb(const Operand& op, Immediate mask) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_optional_rex_32(rax, op);
+ emit(0xF6);
+ emit_operand(rax, op); // Operation code 0
+ emit(mask.value_); // Low byte emitted.
+}
+
+
+void Assembler::testl(Register reg, Immediate mask) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ if (reg.is(rax)) {
+ emit(0xA9);
+ emit(mask);
+ } else {
+ emit_optional_rex_32(rax, reg);
+ emit(0xF7);
+ emit_modrm(0x0, reg);
+ emit(mask);
+ }
+}
+
+
+void Assembler::testl(const Operand& op, Immediate mask) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_optional_rex_32(rax, op);
+ emit(0xF7);
+ emit_operand(rax, op); // Operation code 0
+ emit(mask);
+}
+
+
+void Assembler::testq(const Operand& op, Register reg) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_rex_64(reg, op);
+ emit(0x85);
+ emit_operand(reg, op);
+}
+
+
+void Assembler::testq(Register dst, Register src) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_rex_64(dst, src);
+ emit(0x85);
+ emit_modrm(dst, src);
+}
+
+
+// Relocation information implementations
+
+void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
+ ASSERT(rmode != RelocInfo::NONE);
+ // Don't record external references unless the heap will be serialized.
+ if (rmode == RelocInfo::EXTERNAL_REFERENCE &&
+ !Serializer::enabled() &&
+ !FLAG_debug_code) {
+ return;
+ }
+ RelocInfo rinfo(pc_, rmode, data);
+ reloc_info_writer.Write(&rinfo);
+}
+
+void Assembler::RecordJSReturn() {
+ WriteRecordedPositions();
+ EnsureSpace ensure_space(this);
+ RecordRelocInfo(RelocInfo::JS_RETURN);
+}
+
+
+void Assembler::RecordComment(const char* msg) {
+ if (FLAG_debug_code) {
+ EnsureSpace ensure_space(this);
+ RecordRelocInfo(RelocInfo::COMMENT, reinterpret_cast<intptr_t>(msg));
+ }
+}
+
+
+void Assembler::RecordPosition(int pos) {
+ ASSERT(pos != RelocInfo::kNoPosition);
+ ASSERT(pos >= 0);
+ current_position_ = pos;
+}
+
+
+void Assembler::RecordStatementPosition(int pos) {
+ ASSERT(pos != RelocInfo::kNoPosition);
+ ASSERT(pos >= 0);
+ current_statement_position_ = pos;
+}
+
+
+void Assembler::WriteRecordedPositions() {
+ // Write the statement position if it is different from what was written last
+ // time.
+ if (current_statement_position_ != written_statement_position_) {
+ EnsureSpace ensure_space(this);
+ RecordRelocInfo(RelocInfo::STATEMENT_POSITION, current_statement_position_);
+ written_statement_position_ = current_statement_position_;
+ }
+
+ // Write the position if it is different from what was written last time and
+ // also different from the written statement position.
+ if (current_position_ != written_position_ &&
+ current_position_ != written_statement_position_) {
+ EnsureSpace ensure_space(this);
+ RecordRelocInfo(RelocInfo::POSITION, current_position_);
+ written_position_ = current_position_;
+ }
+}
+
+
+const int RelocInfo::kApplyMask =
+ RelocInfo::kCodeTargetMask | 1 << RelocInfo::RUNTIME_ENTRY |
+ 1 << RelocInfo::JS_RETURN | 1 << RelocInfo::INTERNAL_REFERENCE;
+
+
+} } // namespace v8::internal
+
+
+// TODO(x64): Implement and move these to their correct cc-files:
+#include "ast.h"
+#include "bootstrapper.h"
+#include "codegen-inl.h"
+#include "cpu.h"
+#include "debug.h"
+#include "disasm.h"
+#include "disassembler.h"
+#include "frames-inl.h"
+#include "x64/macro-assembler-x64.h"
+#include "x64/regexp-macro-assembler-x64.h"
+#include "ic-inl.h"
+#include "log.h"
+#include "macro-assembler.h"
+#include "parser.h"
+#include "regexp-macro-assembler.h"
+#include "regexp-stack.h"
+#include "register-allocator-inl.h"
+#include "register-allocator.h"
+#include "runtime.h"
+#include "scopes.h"
+#include "serialize.h"
+#include "stub-cache.h"
+#include "unicode.h"
+
+namespace v8 {
+namespace internal {
+
+void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* a) {
+ UNIMPLEMENTED();
+}
+
+void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* a) {
+ UNIMPLEMENTED();
+}
+
+void ArgumentsAccessStub::GenerateReadLength(MacroAssembler* a) {
+ UNIMPLEMENTED();
+}
+
+
+void BreakLocationIterator::ClearDebugBreakAtReturn() {
+ UNIMPLEMENTED();
+}
+
+bool BreakLocationIterator::IsDebugBreakAtReturn() {
+ UNIMPLEMENTED();
+ return false;
+}
+
+void BreakLocationIterator::SetDebugBreakAtReturn() {
+ UNIMPLEMENTED();
+}
+
+void CallIC::Generate(MacroAssembler* a, int b, ExternalReference const& c) {
+ UNIMPLEMENTED();
+}
+
+void CallIC::GenerateMegamorphic(MacroAssembler* a, int b) {
+ UNIMPLEMENTED();
+}
+
+void CallIC::GenerateNormal(MacroAssembler* a, int b) {
+ UNIMPLEMENTED();
+}
+
+Object* CallStubCompiler::CompileCallConstant(Object* a,
+ JSObject* b,
+ JSFunction* c,
+ StubCompiler::CheckType d,
+ Code::Flags flags) {
+ UNIMPLEMENTED();
+ return NULL;
+}
+
+Object* CallStubCompiler::CompileCallField(Object* a,
+ JSObject* b,
+ int c,
+ String* d,
+ Code::Flags flags) {
+ UNIMPLEMENTED();
+ return NULL;
+}
+
+Object* CallStubCompiler::CompileCallInterceptor(Object* a,
+ JSObject* b,
+ String* c) {
+ UNIMPLEMENTED();
+ return NULL;
+}
+
+
+StackFrame::Type ExitFrame::GetStateForFramePointer(unsigned char* a,
+ StackFrame::State* b) {
+ // TODO(X64): UNIMPLEMENTED
+ return NONE;
+}
+
+int JavaScriptFrame::GetProvidedParametersCount() const {
+ UNIMPLEMENTED();
+ return 0;
+}
+
+void JumpTarget::DoBind(int a) {
+ UNIMPLEMENTED();
+}
+
+void JumpTarget::DoBranch(Condition a, Hint b) {
+ UNIMPLEMENTED();
+}
+
+void JumpTarget::DoJump() {
+ UNIMPLEMENTED();
+}
+
+
+Object* LoadStubCompiler::CompileLoadCallback(JSObject* a,
+ JSObject* b,
+ AccessorInfo* c,
+ String* d) {
+ UNIMPLEMENTED();
+ return NULL;
+}
+
+Object* LoadStubCompiler::CompileLoadConstant(JSObject* a,
+ JSObject* b,
+ Object* c,
+ String* d) {
+ UNIMPLEMENTED();
+ return NULL;
+}
+
+Object* LoadStubCompiler::CompileLoadField(JSObject* a,
+ JSObject* b,
+ int c,
+ String* d) {
+ UNIMPLEMENTED();
+ return NULL;
+}
+
+Object* LoadStubCompiler::CompileLoadInterceptor(JSObject* a,
+ JSObject* b,
+ String* c) {
+ UNIMPLEMENTED();
+ return NULL;
+}
+
+StackFrame::Type StackFrame::ComputeType(StackFrame::State* a) {
+ UNIMPLEMENTED();
+ return NONE;
+}
+
+Object* StoreStubCompiler::CompileStoreCallback(JSObject* a,
+ AccessorInfo* b,
+ String* c) {
+ UNIMPLEMENTED();
+ return NULL;
+}
+
+Object* StoreStubCompiler::CompileStoreField(JSObject* a,
+ int b,
+ Map* c,
+ String* d) {
+ UNIMPLEMENTED();
+ return NULL;
+}
+
+Object* StoreStubCompiler::CompileStoreInterceptor(JSObject* a, String* b) {
+ UNIMPLEMENTED();
+ return NULL;
+}
+
+Object* StubCompiler::CompileLazyCompile(Code::Flags a) {
+ UNIMPLEMENTED();
+ return NULL;
+}
+
+void VirtualFrame::Drop(int a) {
+ UNIMPLEMENTED();
+}
+
+int VirtualFrame::InvalidateFrameSlotAt(int a) {
+ UNIMPLEMENTED();
+ return -1;
+}
+
+void VirtualFrame::MergeTo(VirtualFrame* a) {
+ UNIMPLEMENTED();
+}
+
+Result VirtualFrame::Pop() {
+ UNIMPLEMENTED();
+ return Result(NULL);
+}
+
+Result VirtualFrame::RawCallStub(CodeStub* a) {
+ UNIMPLEMENTED();
+ return Result(NULL);
+}
+
+void VirtualFrame::SyncElementBelowStackPointer(int a) {
+ UNIMPLEMENTED();
+}
+
+void VirtualFrame::SyncElementByPushing(int a) {
+ UNIMPLEMENTED();
+}
+
+void VirtualFrame::SyncRange(int a, int b) {
+ UNIMPLEMENTED();
+}
+
+VirtualFrame::VirtualFrame() : elements_(0) {
+ UNIMPLEMENTED();
+}
+
+byte* ArgumentsAdaptorFrame::GetCallerStackPointer() const {
+ UNIMPLEMENTED();
+ return NULL;
+}
+
+void CodeGenerator::GenerateArgumentsAccess(ZoneList<Expression*>* a) {
+ UNIMPLEMENTED();
+}
+
+void CodeGenerator::GenerateArgumentsLength(ZoneList<Expression*>* a) {
+ UNIMPLEMENTED();
+}
+
+void CodeGenerator::GenerateFastCharCodeAt(ZoneList<Expression*>* a) {
+ UNIMPLEMENTED();
+}
+
+void CodeGenerator::GenerateIsArray(ZoneList<Expression*>* a) {
+ UNIMPLEMENTED();
+}
+
+void CodeGenerator::GenerateIsNonNegativeSmi(ZoneList<Expression*>* a) {
+ UNIMPLEMENTED();
+}
+
+void CodeGenerator::GenerateIsSmi(ZoneList<Expression*>* a) {
+ UNIMPLEMENTED();
+}
+
+void CodeGenerator::GenerateLog(ZoneList<Expression*>* a) {
+ UNIMPLEMENTED();
+}
+
+void CodeGenerator::GenerateObjectEquals(ZoneList<Expression*>* a) {
+ UNIMPLEMENTED();
+}
+
+void CodeGenerator::GenerateSetValueOf(ZoneList<Expression*>* a) {
+ UNIMPLEMENTED();
+}
+
+void CodeGenerator::GenerateValueOf(ZoneList<Expression*>* a) {
+ UNIMPLEMENTED();
+}
+
+void ExitFrame::Iterate(ObjectVisitor* a) const {
+ UNIMPLEMENTED();
+}
+
+byte* InternalFrame::GetCallerStackPointer() const {
+ UNIMPLEMENTED();
+ return NULL;
+}
+
+byte* JavaScriptFrame::GetCallerStackPointer() const {
+ UNIMPLEMENTED();
+ return NULL;
+}
+
} } // namespace v8::internal
diff --git a/deps/v8/src/x64/assembler-x64.h b/deps/v8/src/x64/assembler-x64.h
index 40fcdd32b..b4882571e 100644
--- a/deps/v8/src/x64/assembler-x64.h
+++ b/deps/v8/src/x64/assembler-x64.h
@@ -37,7 +37,21 @@
#ifndef V8_X64_ASSEMBLER_X64_H_
#define V8_X64_ASSEMBLER_X64_H_
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
+
+// Utility functions
+
+// Test whether a 64-bit value is in a specific range.
+static inline bool is_uint32(int64_t x) {
+ const int64_t kUInt32Mask = V8_INT64_C(0xffffffff);
+ return x == x & kUInt32Mask;
+}
+
+static inline bool is_int32(int64_t x) {
+ const int64_t kMinIntValue = V8_INT64_C(-0x80000000);
+ return is_uint32(x - kMinIntValue);
+}
// CPU Registers.
//
@@ -60,10 +74,13 @@ namespace v8 { namespace internal {
// mode. This way we get the compile-time error checking in debug mode
// and best performance in optimized code.
//
-const int kNumRegisters = 16;
struct Register {
- bool is_valid() const { return 0 <= code_ && code_ < kNumRegisters; }
+ static Register toRegister(int code) {
+ Register r = {code};
+ return r;
+ }
+ bool is_valid() const { return 0 <= code_ && code_ < 16; }
bool is(Register reg) const { return code_ == reg.code_; }
// The byte-register distinction of ai32 has dissapeared.
bool is_byte_register() const { return false; }
@@ -98,7 +115,6 @@ extern Register r14;
extern Register r15;
extern Register no_reg;
-
struct XMMRegister {
bool is_valid() const { return 0 <= code_ && code_ < 2; }
int code() const {
@@ -117,6 +133,14 @@ extern XMMRegister xmm4;
extern XMMRegister xmm5;
extern XMMRegister xmm6;
extern XMMRegister xmm7;
+extern XMMRegister xmm8;
+extern XMMRegister xmm9;
+extern XMMRegister xmm10;
+extern XMMRegister xmm11;
+extern XMMRegister xmm12;
+extern XMMRegister xmm13;
+extern XMMRegister xmm14;
+extern XMMRegister xmm15;
enum Condition {
// any value < 0 is considered no_condition
@@ -200,34 +224,11 @@ inline Hint NegateHint(Hint hint) {
class Immediate BASE_EMBEDDED {
public:
- inline explicit Immediate(int64_t x);
- inline explicit Immediate(const char* s);
- inline explicit Immediate(const ExternalReference& ext);
- inline explicit Immediate(Handle<Object> handle);
+ explicit Immediate(int32_t value) : value_(value) {}
inline explicit Immediate(Smi* value);
- static Immediate CodeRelativeOffset(Label* label) {
- return Immediate(label);
- }
-
- bool is_zero() const { return x_ == 0 && rmode_ == RelocInfo::NONE; }
- bool is_int8() const {
- return -128 <= x_ && x_ < 128 && rmode_ == RelocInfo::NONE;
- }
- bool is_int16() const {
- return -32768 <= x_ && x_ < 32768 && rmode_ == RelocInfo::NONE;
- }
- bool is_int32() const {
- return V8_INT64_C(-2147483648) <= x_
- && x_ < V8_INT64_C(2147483648)
- && rmode_ == RelocInfo::NONE;
- }
-
private:
- inline explicit Immediate(Label* value) { UNIMPLEMENTED(); }
-
- int64_t x_;
- RelocInfo::Mode rmode_;
+ int32_t value_;
friend class Assembler;
};
@@ -237,177 +238,55 @@ class Immediate BASE_EMBEDDED {
// Machine instruction Operands
enum ScaleFactor {
- times_1 = 0,
- times_2 = 1,
- times_4 = 2,
- times_8 = 3
+ kTimes1 = 0,
+ kTimes2 = 1,
+ kTimes4 = 2,
+ kTimes8 = 3,
+ kTimesIntSize = kTimes4,
+ kTimesPointerSize = kTimes8
};
class Operand BASE_EMBEDDED {
public:
- // reg
- INLINE(explicit Operand(Register reg));
-
- // MemoryOperand
- INLINE(explicit Operand()) { UNIMPLEMENTED(); }
-
- // Returns true if this Operand is a wrapper for the specified register.
- bool is_reg(Register reg) const;
-
- // These constructors have been moved to MemOperand, and should
- // be removed from Operand as soon as all their uses use MemOperands instead.
- // [disp/r]
- INLINE(explicit Operand(intptr_t disp, RelocInfo::Mode rmode)) {
- UNIMPLEMENTED();
- }
- // disp only must always be relocated
-
// [base + disp/r]
- explicit Operand(Register base, intptr_t disp,
- RelocInfo::Mode rmode = RelocInfo::NONE);
+ INLINE(Operand(Register base, int32_t disp));
// [base + index*scale + disp/r]
- explicit Operand(Register base,
- Register index,
- ScaleFactor scale,
- intptr_t disp,
- RelocInfo::Mode rmode = RelocInfo::NONE);
+ Operand(Register base,
+ Register index,
+ ScaleFactor scale,
+ int32_t disp);
// [index*scale + disp/r]
- explicit Operand(Register index,
- ScaleFactor scale,
- intptr_t disp,
- RelocInfo::Mode rmode = RelocInfo::NONE);
-
- static Operand StaticVariable(const ExternalReference& ext) {
- return Operand(reinterpret_cast<intptr_t>(ext.address()),
- RelocInfo::EXTERNAL_REFERENCE);
- }
-
- static Operand StaticArray(Register index,
- ScaleFactor scale,
- const ExternalReference& arr) {
- return Operand(index, scale, reinterpret_cast<intptr_t>(arr.address()),
- RelocInfo::EXTERNAL_REFERENCE);
- }
-
- // End of constructors and methods that have been moved to MemOperand.
+ Operand(Register index,
+ ScaleFactor scale,
+ int32_t disp);
private:
byte rex_;
byte buf_[10];
// The number of bytes in buf_.
unsigned int len_;
- // Only valid if len_ > 4.
RelocInfo::Mode rmode_;
- // Set the ModRM byte without an encoded 'reg' register. The
+ // Set the ModR/M byte without an encoded 'reg' register. The
// register is encoded later as part of the emit_operand operation.
+ // set_modrm can be called before or after set_sib and set_disp*.
inline void set_modrm(int mod, Register rm);
+ // Set the SIB byte if one is needed. Sets the length to 2 rather than 1.
inline void set_sib(ScaleFactor scale, Register index, Register base);
- inline void set_disp8(int8_t disp);
- inline void set_disp32(int32_t disp);
- inline void set_dispr(intptr_t disp, RelocInfo::Mode rmode);
-
- friend class Assembler;
-};
-
-class MemOperand : public Operand {
- public:
- // [disp/r]
- INLINE(explicit MemOperand(intptr_t disp, RelocInfo::Mode rmode)) :
- Operand() {
- UNIMPLEMENTED();
- }
- // disp only must always be relocated
-
- // [base + disp/r]
- explicit MemOperand(Register base, intptr_t disp,
- RelocInfo::Mode rmode = RelocInfo::NONE);
-
- // [base + index*scale + disp/r]
- explicit MemOperand(Register base,
- Register index,
- ScaleFactor scale,
- intptr_t disp,
- RelocInfo::Mode rmode = RelocInfo::NONE);
- // [index*scale + disp/r]
- explicit MemOperand(Register index,
- ScaleFactor scale,
- intptr_t disp,
- RelocInfo::Mode rmode = RelocInfo::NONE);
-
- static MemOperand StaticVariable(const ExternalReference& ext) {
- return MemOperand(reinterpret_cast<intptr_t>(ext.address()),
- RelocInfo::EXTERNAL_REFERENCE);
- }
-
- static MemOperand StaticArray(Register index,
- ScaleFactor scale,
- const ExternalReference& arr) {
- return MemOperand(index, scale, reinterpret_cast<intptr_t>(arr.address()),
- RelocInfo::EXTERNAL_REFERENCE);
- }
-};
-
-// -----------------------------------------------------------------------------
-// A Displacement describes the 32bit immediate field of an instruction which
-// may be used together with a Label in order to refer to a yet unknown code
-// position. Displacements stored in the instruction stream are used to describe
-// the instruction and to chain a list of instructions using the same Label.
-// A Displacement contains 2 different fields:
-//
-// next field: position of next displacement in the chain (0 = end of list)
-// type field: instruction type
-//
-// A next value of null (0) indicates the end of a chain (note that there can
-// be no displacement at position zero, because there is always at least one
-// instruction byte before the displacement).
-//
-// Displacement _data field layout
-//
-// |31.....2|1......0|
-// [ next | type |
-
-class Displacement BASE_EMBEDDED {
- public:
- enum Type {
- UNCONDITIONAL_JUMP,
- CODE_RELATIVE,
- OTHER
- };
-
- int data() const { return data_; }
- Type type() const { return TypeField::decode(data_); }
- void next(Label* L) const {
- int n = NextField::decode(data_);
- n > 0 ? L->link_to(n) : L->Unuse();
- }
- void link_to(Label* L) { init(L, type()); }
-
- explicit Displacement(int data) { data_ = data; }
-
- Displacement(Label* L, Type type) { init(L, type); }
+ // Adds operand displacement fields (offsets added to the memory address).
+ // Needs to be called after set_sib, not before it.
+ inline void set_disp8(int disp);
+ inline void set_disp32(int disp);
- void print() {
- PrintF("%s (%x) ", (type() == UNCONDITIONAL_JUMP ? "jmp" : "[other]"),
- NextField::decode(data_));
- }
-
- private:
- int data_;
-
- class TypeField: public BitField<Type, 0, 2> {};
- class NextField: public BitField<int, 2, 32-2> {};
-
- void init(Label* L, Type type);
+ friend class Assembler;
};
-
// CpuFeatures keeps track of which features are supported by the target CPU.
// Supported features must be enabled by a Scope before use.
// Example:
@@ -428,11 +307,11 @@ class CpuFeatures : public AllStatic {
static void Probe();
// Check whether a feature is supported by the target CPU.
static bool IsSupported(Feature f) {
- return (supported_ & (static_cast<uint64_t>(1) << f)) != 0;
+ return (supported_ & (V8_UINT64_C(1) << f)) != 0;
}
// Check whether a feature is currently enabled.
static bool IsEnabled(Feature f) {
- return (enabled_ & (static_cast<uint64_t>(1) << f)) != 0;
+ return (enabled_ & (V8_UINT64_C(1) << f)) != 0;
}
// Enable a specified feature within a scope.
class Scope BASE_EMBEDDED {
@@ -441,7 +320,7 @@ class CpuFeatures : public AllStatic {
explicit Scope(Feature f) {
ASSERT(CpuFeatures::IsSupported(f));
old_enabled_ = CpuFeatures::enabled_;
- CpuFeatures::enabled_ |= (static_cast<uint64_t>(1) << f);
+ CpuFeatures::enabled_ |= (V8_UINT64_C(1) << f);
}
~Scope() { CpuFeatures::enabled_ = old_enabled_; }
private:
@@ -461,7 +340,8 @@ class Assembler : public Malloced {
private:
// The relocation writer's position is kGap bytes below the end of
// the generated instructions. This leaves enough space for the
- // longest possible ia32 instruction (17 bytes as of 9/26/06) and
+ // longest possible x64 instruction (There is a 15 byte limit on
+ // instruction length, ruling out some otherwise valid instructions) and
// allows for a single, fast space check per instruction.
static const int kGap = 32;
@@ -488,8 +368,9 @@ class Assembler : public Malloced {
void GetCode(CodeDesc* desc);
// Read/Modify the code target in the branch/call instruction at pc.
- inline static Address target_address_at(Address pc);
- inline static void set_target_address_at(Address pc, Address target);
+ // On the x64 architecture, the address is absolute, not relative.
+ static inline Address target_address_at(Address pc);
+ static inline void set_target_address_at(Address pc, Address target);
// Distance between the address of the code target in the call instruction
// and the return address
@@ -499,22 +380,20 @@ class Assembler : public Malloced {
// ---------------------------------------------------------------------------
// Code generation
//
- // - function names correspond one-to-one to ia32 instruction mnemonics
- // - unless specified otherwise, instructions operate on 32bit operands
- // - instructions on 8bit (byte) operands/registers have a trailing '_b'
- // - instructions on 16bit (word) operands/registers have a trailing '_w'
- // - naming conflicts with C++ keywords are resolved via a trailing '_'
-
- // NOTE ON INTERFACE: Currently, the interface is not very consistent
- // in the sense that some operations (e.g. mov()) can be called in more
- // the one way to generate the same instruction: The Register argument
- // can in some cases be replaced with an Operand(Register) argument.
- // This should be cleaned up and made more orthogonal. The questions
- // is: should we always use Operands instead of Registers where an
- // Operand is possible, or should we have a Register (overloaded) form
- // instead? We must be careful to make sure that the selected instruction
- // is obvious from the parameters to avoid hard-to-find code generation
- // bugs.
+ // Function names correspond one-to-one to x64 instruction mnemonics.
+ // Unless specified otherwise, instructions operate on 64-bit operands.
+ //
+ // If we need versions of an assembly instruction that operate on different
+ // width arguments, we add a single-letter suffix specifying the width.
+ // This is done for the following instructions: mov, cmp.
+ // There are no versions of these instructions without the suffix.
+ // - Instructions on 8-bit (byte) operands/registers have a trailing 'b'.
+ // - Instructions on 16-bit (word) operands/registers have a trailing 'w'.
+ // - Instructions on 32-bit (doubleword) operands/registers use 'l'.
+ // - Instructions on 64-bit (quadword) operands/registers use 'q'.
+ //
+ // Some mnemonics, such as "and", are the same as C++ keywords.
+ // Naming conflicts with C++ keywords are resolved by adding a trailing '_'.
// Insert the smallest number of nop instructions
// possible to align the pc offset to a multiple
@@ -522,13 +401,10 @@ class Assembler : public Malloced {
void Align(int m);
// Stack
- void pushad();
- void popad();
+ void pushfq();
+ void popfq();
- void pushfd();
- void popfd();
-
- void push(const Immediate& x);
+ void push(Immediate value);
void push(Register src);
void push(const Operand& src);
void push(Label* label, RelocInfo::Mode relocation_mode);
@@ -536,25 +412,42 @@ class Assembler : public Malloced {
void pop(Register dst);
void pop(const Operand& dst);
- void enter(const Immediate& size);
+ void enter(Immediate size);
void leave();
// Moves
- void mov_b(Register dst, const Operand& src);
- void mov_b(const Operand& dst, int8_t imm8);
- void mov_b(const Operand& dst, Register src);
-
- void mov_w(Register dst, const Operand& src);
- void mov_w(const Operand& dst, Register src);
-
- void mov(Register dst, int32_t imm32);
- void mov(Register dst, const Immediate& x);
- void mov(Register dst, Handle<Object> handle);
- void mov(Register dst, const Operand& src);
- void mov(Register dst, Register src);
- void mov(const Operand& dst, const Immediate& x);
- void mov(const Operand& dst, Handle<Object> handle);
- void mov(const Operand& dst, Register src);
+ void movb(Register dst, const Operand& src);
+ void movb(Register dst, Immediate imm);
+ void movb(const Operand& dst, Register src);
+
+ void movl(Register dst, Register src);
+ void movl(Register dst, const Operand& src);
+ void movl(const Operand& dst, Register src);
+ // Load a 32-bit immediate value, zero-extended to 64 bits.
+ void movl(Register dst, Immediate imm32);
+
+ void movq(Register dst, int32_t imm32);
+ void movq(Register dst, const Operand& src);
+ // Sign extends immediate 32-bit value to 64 bits.
+ void movq(Register dst, Immediate x);
+ void movq(Register dst, Register src);
+
+ // Move 64 bit register value to 64-bit memory location.
+ void movq(const Operand& dst, Register src);
+
+ // New x64 instructions to load a 64-bit immediate into a register.
+ // All 64-bit immediates must have a relocation mode.
+ void movq(Register dst, void* ptr, RelocInfo::Mode rmode);
+ void movq(Register dst, int64_t value, RelocInfo::Mode rmode);
+ void movq(Register dst, const char* s, RelocInfo::Mode rmode);
+ // Moves the address of the external reference into the register.
+ void movq(Register dst, ExternalReference ext);
+ void movq(Register dst, Handle<Object> handle, RelocInfo::Mode rmode);
+
+
+ // New x64 instruction to load from an immediate 64-bit pointer into RAX.
+ void load_rax(void* ptr, RelocInfo::Mode rmode);
+ void load_rax(ExternalReference ext);
void movsx_b(Register dst, const Operand& src);
@@ -573,84 +466,208 @@ class Assembler : public Malloced {
void xchg(Register dst, Register src);
// Arithmetics
- void adc(Register dst, int32_t imm32);
- void adc(Register dst, const Operand& src);
+ void add(Register dst, Register src) {
+ arithmetic_op(0x03, dst, src);
+ }
- void add(Register dst, const Operand& src);
- void add(const Operand& dst, const Immediate& x);
+ void add(Register dst, const Operand& src) {
+ arithmetic_op(0x03, dst, src);
+ }
- void and_(Register dst, int32_t imm32);
- void and_(Register dst, const Operand& src);
- void and_(const Operand& src, Register dst);
- void and_(const Operand& dst, const Immediate& x);
+
+ void add(const Operand& dst, Register src) {
+ arithmetic_op(0x01, src, dst);
+ }
+
+ void add(Register dst, Immediate src) {
+ immediate_arithmetic_op(0x0, dst, src);
+ }
+
+ void add(const Operand& dst, Immediate src) {
+ immediate_arithmetic_op(0x0, dst, src);
+ }
+
+ void cmp(Register dst, Register src) {
+ arithmetic_op(0x3B, dst, src);
+ }
+
+ void cmp(Register dst, const Operand& src) {
+ arithmetic_op(0x3B, dst, src);
+ }
+
+ void cmp(const Operand& dst, Register src) {
+ arithmetic_op(0x39, src, dst);
+ }
+
+ void cmp(Register dst, Immediate src) {
+ immediate_arithmetic_op(0x7, dst, src);
+ }
+
+ void cmp(const Operand& dst, Immediate src) {
+ immediate_arithmetic_op(0x7, dst, src);
+ }
+
+ void and_(Register dst, Register src) {
+ arithmetic_op(0x23, dst, src);
+ }
+
+ void and_(Register dst, const Operand& src) {
+ arithmetic_op(0x23, dst, src);
+ }
+
+ void and_(const Operand& dst, Register src) {
+ arithmetic_op(0x21, src, dst);
+ }
+
+ void and_(Register dst, Immediate src) {
+ immediate_arithmetic_op(0x4, dst, src);
+ }
+
+ void and_(const Operand& dst, Immediate src) {
+ immediate_arithmetic_op(0x4, dst, src);
+ }
void cmpb(const Operand& op, int8_t imm8);
void cmpb_al(const Operand& op);
void cmpw_ax(const Operand& op);
void cmpw(const Operand& op, Immediate imm16);
- void cmp(Register reg, int32_t imm32);
- void cmp(Register reg, Handle<Object> handle);
- void cmp(Register reg, const Operand& op);
- void cmp(const Operand& op, const Immediate& imm);
void dec_b(Register dst);
void dec(Register dst);
void dec(const Operand& dst);
- void cdq();
+ // Sign-extends rax into rdx:rax.
+ void cqo();
+ // Divide rdx:rax by src. Quotient in rax, remainder in rdx.
void idiv(Register src);
+ void imul(Register dst, Register src);
void imul(Register dst, const Operand& src);
- void imul(Register dst, Register src, int32_t imm32);
+ // Performs the operation dst = src * imm.
+ void imul(Register dst, Register src, Immediate imm);
void inc(Register dst);
void inc(const Operand& dst);
void lea(Register dst, const Operand& src);
+ // Multiply rax by src, put the result in rdx:rax.
void mul(Register src);
void neg(Register dst);
+ void neg(const Operand& dst);
void not_(Register dst);
+ void not_(const Operand& dst);
+
+ void or_(Register dst, Register src) {
+ arithmetic_op(0x0B, dst, src);
+ }
+
+ void or_(Register dst, const Operand& src) {
+ arithmetic_op(0x0B, dst, src);
+ }
+
+ void or_(const Operand& dst, Register src) {
+ arithmetic_op(0x09, src, dst);
+ }
+
+ void or_(Register dst, Immediate src) {
+ immediate_arithmetic_op(0x1, dst, src);
+ }
+
+ void or_(const Operand& dst, Immediate src) {
+ immediate_arithmetic_op(0x1, dst, src);
+ }
- void or_(Register dst, int32_t imm32);
- void or_(Register dst, const Operand& src);
- void or_(const Operand& dst, Register src);
- void or_(const Operand& dst, const Immediate& x);
void rcl(Register dst, uint8_t imm8);
- void sar(Register dst, uint8_t imm8);
- void sar(Register dst);
+ // Shifts dst:src left by cl bits, affecting only dst.
+ void shld(Register dst, Register src);
- void sbb(Register dst, const Operand& src);
+ // Shifts src:dst right by cl bits, affecting only dst.
+ void shrd(Register dst, Register src);
- void shld(Register dst, const Operand& src);
+ // Shifts dst right, duplicating sign bit, by shift_amount bits.
+ // Shifting by 1 is handled efficiently.
+ void sar(Register dst, Immediate shift_amount) {
+ shift(dst, shift_amount, 0x7);
+ }
- void shl(Register dst, uint8_t imm8);
- void shl(Register dst);
+ // Shifts dst right, duplicating sign bit, by cl % 64 bits.
+ void sar(Register dst) {
+ shift(dst, 0x7);
+ }
- void shrd(Register dst, const Operand& src);
+ void shl(Register dst, Immediate shift_amount) {
+ shift(dst, shift_amount, 0x4);
+ }
- void shr(Register dst, uint8_t imm8);
- void shr(Register dst);
- void shr_cl(Register dst);
+ void shl(Register dst) {
+ shift(dst, 0x4);
+ }
- void sub(const Operand& dst, const Immediate& x);
- void sub(Register dst, const Operand& src);
- void sub(const Operand& dst, Register src);
+ void shr(Register dst, Immediate shift_amount) {
+ shift(dst, shift_amount, 0x5);
+ }
- void test(Register reg, const Immediate& imm);
- void test(Register reg, const Operand& op);
- void test(const Operand& op, const Immediate& imm);
+ void shr(Register dst) {
+ shift(dst, 0x5);
+ }
+
+ void store_rax(void* dst, RelocInfo::Mode mode);
+ void store_rax(ExternalReference ref);
+
+ void sub(Register dst, Register src) {
+ arithmetic_op(0x2B, dst, src);
+ }
+
+ void sub(Register dst, const Operand& src) {
+ arithmetic_op(0x2B, dst, src);
+ }
+
+ void sub(const Operand& dst, Register src) {
+ arithmetic_op(0x29, src, dst);
+ }
+
+ void sub(Register dst, Immediate src) {
+ immediate_arithmetic_op(0x5, dst, src);
+ }
+
+ void sub(const Operand& dst, Immediate src) {
+ immediate_arithmetic_op(0x5, dst, src);
+ }
+
+ void testb(Register reg, Immediate mask);
+ void testb(const Operand& op, Immediate mask);
+ void testl(Register reg, Immediate mask);
+ void testl(const Operand& op, Immediate mask);
+ void testq(const Operand& op, Register reg);
+ void testq(Register dst, Register src);
+
+ void xor_(Register dst, Register src) {
+ arithmetic_op(0x33, dst, src);
+ }
+
+ void xor_(Register dst, const Operand& src) {
+ arithmetic_op(0x33, dst, src);
+ }
+
+ void xor_(const Operand& dst, Register src) {
+ arithmetic_op(0x31, src, dst);
+ }
+
+ void xor_(Register dst, Immediate src) {
+ immediate_arithmetic_op(0x6, dst, src);
+ }
+
+ void xor_(const Operand& dst, Immediate src) {
+ immediate_arithmetic_op(0x6, dst, src);
+ }
- void xor_(Register dst, int32_t imm32);
- void xor_(Register dst, const Operand& src);
- void xor_(const Operand& src, Register dst);
- void xor_(const Operand& dst, const Immediate& x);
// Bit operations.
void bt(const Operand& dst, Register src);
@@ -660,6 +677,7 @@ class Assembler : public Malloced {
void hlt();
void int3();
void nop();
+ void nop(int n);
void rdtsc();
void ret(int imm16);
@@ -681,21 +699,26 @@ class Assembler : public Malloced {
void bind(Label* L); // binds an unbound label L to the current code position
// Calls
+ // Call near relative 32-bit displacement, relative to next instruction.
void call(Label* L);
- void call(byte* entry, RelocInfo::Mode rmode);
- void call(const Operand& adr);
- void call(Handle<Code> code, RelocInfo::Mode rmode);
+
+ // Call near absolute indirect, address in register
+ void call(Register adr);
+
+ // Call near indirect
+ void call(const Operand& operand);
// Jumps
+ // Jump short or near relative.
void jmp(Label* L); // unconditional jump to L
- void jmp(byte* entry, RelocInfo::Mode rmode);
- void jmp(const Operand& adr);
- void jmp(Handle<Code> code, RelocInfo::Mode rmode);
+
+ // Jump near absolute indirect (r64)
+ void jmp(Register adr);
// Conditional jumps
- void j(Condition cc, Label* L, Hint hint = no_hint);
- void j(Condition cc, byte* entry, RelocInfo::Mode rmode, Hint hint = no_hint);
- void j(Condition cc, Handle<Code> code, Hint hint = no_hint);
+ void j(Condition cc, Label* L);
+ void j(Condition cc, byte* entry, RelocInfo::Mode rmode);
+ void j(Condition cc, Handle<Code> code);
// Floating-point operations
void fld(int i);
@@ -788,9 +811,13 @@ class Assembler : public Malloced {
void RecordStatementPosition(int pos);
void WriteRecordedPositions();
- // Writes a single word of data in the code stream.
+ // Writes a doubleword of data in the code stream.
+ // Used for inline tables, e.g., jump-tables.
+ void dd(uint32_t data);
+
+ // Writes a quadword of data in the code stream.
// Used for inline tables, e.g., jump-tables.
- void dd(uint32_t data, RelocInfo::Mode reloc_info);
+ void dd(uint64_t data, RelocInfo::Mode reloc_info);
// Writes the absolute address of a bound label at the given position in
// the generated code. That positions should have the relocation mode
@@ -833,25 +860,119 @@ class Assembler : public Malloced {
// code emission
void GrowBuffer();
- inline void emit(uint32_t x);
+
+ void emit(byte x) { *pc_++ = x; }
+ inline void emitl(uint32_t x);
inline void emit(Handle<Object> handle);
- inline void emit(uint32_t x, RelocInfo::Mode rmode);
- inline void emit(const Immediate& x);
- inline void emit_w(const Immediate& x);
+ inline void emitq(uint64_t x, RelocInfo::Mode rmode);
+ inline void emitw(uint16_t x);
+ void emit(Immediate x) { emitl(x.value_); }
+
+ // Emits a REX prefix that encodes a 64-bit operand size and
+ // the top bit of both register codes.
+ // High bit of reg goes to REX.R, high bit of rm_reg goes to REX.B.
+ // REX.W is set.
+ inline void emit_rex_64(Register reg, Register rm_reg);
+
+ // Emits a REX prefix that encodes a 64-bit operand size and
+ // the top bit of the destination, index, and base register codes.
+ // The high bit of reg is used for REX.R, the high bit of op's base
+ // register is used for REX.B, and the high bit of op's index register
+ // is used for REX.X. REX.W is set.
+ inline void emit_rex_64(Register reg, const Operand& op);
+
+ // Emits a REX prefix that encodes a 64-bit operand size and
+ // the top bit of the register code.
+ // The high bit of register is used for REX.B.
+ // REX.W is set and REX.R and REX.X are clear.
+ inline void emit_rex_64(Register rm_reg);
+
+ // Emits a REX prefix that encodes a 64-bit operand size and
+ // the top bit of the index and base register codes.
+ // The high bit of op's base register is used for REX.B, and the high
+ // bit of op's index register is used for REX.X.
+ // REX.W is set and REX.R clear.
+ inline void emit_rex_64(const Operand& op);
+
+ // Emit a REX prefix that only sets REX.W to choose a 64-bit operand size.
+ void emit_rex_64() { emit(0x48); }
+
+ // High bit of reg goes to REX.R, high bit of rm_reg goes to REX.B.
+ // REX.W is clear.
+ inline void emit_rex_32(Register reg, Register rm_reg);
+
+ // The high bit of reg is used for REX.R, the high bit of op's base
+ // register is used for REX.B, and the high bit of op's index register
+ // is used for REX.X. REX.W is cleared.
+ inline void emit_rex_32(Register reg, const Operand& op);
+
+ // High bit of rm_reg goes to REX.B.
+ // REX.W, REX.R and REX.X are clear.
+ inline void emit_rex_32(Register rm_reg);
+
+ // High bit of base goes to REX.B and high bit of index to REX.X.
+ // REX.W and REX.R are clear.
+ inline void emit_rex_32(const Operand& op);
+
+ // High bit of reg goes to REX.R, high bit of rm_reg goes to REX.B.
+ // REX.W is cleared. If no REX bits are set, no byte is emitted.
+ inline void emit_optional_rex_32(Register reg, Register rm_reg);
+
+ // The high bit of reg is used for REX.R, the high bit of op's base
+ // register is used for REX.B, and the high bit of op's index register
+ // is used for REX.X. REX.W is cleared. If no REX bits are set, nothing
+ // is emitted.
+ inline void emit_optional_rex_32(Register reg, const Operand& op);
+
+ // Optionally do as emit_rex_32(Register) if the register number has
+ // the high bit set.
+ inline void emit_optional_rex_32(Register rm_reg);
+
+ // Optionally do as emit_rex_32(const Operand&) if the operand register
+ // numbers have a high bit set.
+ inline void emit_optional_rex_32(const Operand& op);
+
+
+ // Emit the ModR/M byte, and optionally the SIB byte and
+ // 1- or 4-byte offset for a memory operand. Also encodes
+ // the second operand of the operation, a register or operation
+ // subcode, into the reg field of the ModR/M byte.
+ void emit_operand(Register reg, const Operand& adr) {
+ emit_operand(reg.code() & 0x07, adr);
+ }
- // Emit the code-object-relative offset of the label's position
- inline void emit_code_relative_offset(Label* label);
+ // Emit the ModR/M byte, and optionally the SIB byte and
+ // 1- or 4-byte offset for a memory operand. Also used to encode
+ // a three-bit opcode extension into the ModR/M byte.
+ void emit_operand(int rm, const Operand& adr);
- // instruction generation
- void emit_arith_b(int op1, int op2, Register dst, int imm8);
+ // Emit a ModR/M byte with registers coded in the reg and rm_reg fields.
+ void emit_modrm(Register reg, Register rm_reg) {
+ emit(0xC0 | (reg.code() & 0x7) << 3 | (rm_reg.code() & 0x7));
+ }
- // Emit a basic arithmetic instruction (i.e. first byte of the family is 0x81)
- // with a given destination expression and an immediate operand. It attempts
- // to use the shortest encoding possible.
- // sel specifies the /n in the modrm byte (see the Intel PRM).
- void emit_arith(int sel, Operand dst, const Immediate& x);
+ // Emit a ModR/M byte with an operation subcode in the reg field and
+ // a register in the rm_reg field.
+ void emit_modrm(int code, Register rm_reg) {
+ ASSERT((code & ~0x7) == 0);
+ emit(0xC0 | (code & 0x7) << 3 | (rm_reg.code() & 0x7));
+ }
- void emit_operand(Register reg, const Operand& adr);
+ // Emit the code-object-relative offset of the label's position
+ inline void emit_code_relative_offset(Label* label);
+
+ // Emit machine code for one of the operations ADD, ADC, SUB, SBC,
+ // AND, OR, XOR, or CMP. The encodings of these operations are all
+ // similar, differing just in the opcode or in the reg field of the
+ // ModR/M byte.
+ void arithmetic_op(byte opcode, Register dst, Register src);
+ void arithmetic_op(byte opcode, Register reg, const Operand& op);
+ void immediate_arithmetic_op(byte subcode, Register dst, Immediate src);
+ void immediate_arithmetic_op(byte subcode, const Operand& dst, Immediate src);
+ // Emit machine code for a shift operation.
+ void shift(Register dst, Immediate shift_amount, int subcode);
+ // Shift dst by cl % 64 bits.
+ void shift(Register dst, int subcode);
void emit_farith(int b1, int b2, int i);
@@ -860,11 +981,6 @@ class Assembler : public Malloced {
void bind_to(Label* L, int pos);
void link_to(Label* L, Label* appendix);
- // displacements
- inline Displacement disp_at(Label* L);
- inline void disp_at_put(Label* L, Displacement disp);
- inline void emit_disp(Label* L, Displacement::Type type);
-
// record reloc info for current pc_
void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0);
@@ -877,6 +993,8 @@ class Assembler : public Malloced {
int buffer_size_;
// True if the assembler owns the buffer, false if buffer is external.
bool own_buffer_;
+ // A previously allocated buffer of kMinimalBufferSize bytes, or NULL.
+ static byte* spare_buffer_;
// code generation
byte* pc_; // the program counter; moves forward
diff --git a/deps/v8/src/x64/builtins-x64.cc b/deps/v8/src/x64/builtins-x64.cc
index 209aa2d30..3f1cd9fab 100644
--- a/deps/v8/src/x64/builtins-x64.cc
+++ b/deps/v8/src/x64/builtins-x64.cc
@@ -25,3 +25,41 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#include "v8.h"
+#include "codegen-inl.h"
+
+namespace v8 {
+namespace internal {
+
+void Builtins::Generate_Adaptor(MacroAssembler* masm,
+ Builtins::CFunctionId id) {
+ masm->int3(); // UNIMPLEMENTED.
+}
+
+void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
+ masm->int3(); // UNIMPLEMENTED.
+}
+
+void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
+ masm->int3(); // UNIMPLEMENTED.
+}
+
+void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
+ masm->int3(); // UNIMPLEMENTED.
+}
+
+void Builtins::Generate_JSConstructCall(MacroAssembler* masm) {
+ masm->int3(); // UNIMPLEMENTED.
+}
+
+void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
+ masm->int3(); // UNIMPLEMENTED.
+}
+
+void Builtins::Generate_JSEntryTrampoline(MacroAssembler* masm) {
+ masm->int3(); // UNIMPLEMENTED.
+}
+
+} } // namespace v8::internal
+
+
diff --git a/deps/v8/src/x64/codegen-x64-inl.h b/deps/v8/src/x64/codegen-x64-inl.h
new file mode 100644
index 000000000..0d5b0e21d
--- /dev/null
+++ b/deps/v8/src/x64/codegen-x64-inl.h
@@ -0,0 +1,42 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+#ifndef V8_X64_CODEGEN_X64_INL_H_
+#define V8_X64_CODEGEN_X64_INL_H_
+
+namespace v8 {
+namespace internal {
+
+// Platform-specific inline functions.
+
+void DeferredCode::Jump() { UNIMPLEMENTED(); }
+void DeferredCode::Branch(Condition cc) { UNIMPLEMENTED(); }
+
+} } // namespace v8::internal
+
+#endif // V8_X64_CODEGEN_X64_INL_H_
diff --git a/deps/v8/src/x64/codegen-x64.cc b/deps/v8/src/x64/codegen-x64.cc
index 209aa2d30..ca58e09a7 100644
--- a/deps/v8/src/x64/codegen-x64.cc
+++ b/deps/v8/src/x64/codegen-x64.cc
@@ -25,3 +25,312 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+#include "macro-assembler.h"
+#include "register-allocator-inl.h"
+#include "codegen.h"
+
+namespace v8 {
+namespace internal {
+
+// -------------------------------------------------------------------------
+// Platform-specific DeferredCode functions.
+
+void DeferredCode::SaveRegisters() { UNIMPLEMENTED(); }
+
+void DeferredCode::RestoreRegisters() { UNIMPLEMENTED(); }
+
+
+CodeGenerator::CodeGenerator(int buffer_size,
+ Handle<Script> script,
+ bool is_eval)
+ : is_eval_(is_eval),
+ script_(script),
+ deferred_(8),
+ masm_(new MacroAssembler(NULL, buffer_size)),
+ scope_(NULL),
+ frame_(NULL),
+ allocator_(NULL),
+ state_(NULL),
+ loop_nesting_(0),
+ function_return_is_shadowed_(false),
+ in_spilled_code_(false) {
+}
+
+#define __ masm->
+
+
+void CodeGenerator::DeclareGlobals(Handle<FixedArray> a) {
+ UNIMPLEMENTED();
+}
+
+void CodeGenerator::GenCode(FunctionLiteral* a) {
+ masm_->int3(); // UNIMPLEMENTED
+}
+
+void CodeGenerator::GenerateFastCaseSwitchJumpTable(SwitchStatement* a,
+ int b,
+ int c,
+ Label* d,
+ Vector<Label*> e,
+ Vector<Label> f) {
+ UNIMPLEMENTED();
+}
+
+void CodeGenerator::VisitStatements(ZoneList<Statement*>* a) {
+ UNIMPLEMENTED();
+}
+
+void CodeGenerator::VisitBlock(Block* a) {
+ UNIMPLEMENTED();
+}
+
+void CodeGenerator::VisitDeclaration(Declaration* a) {
+ UNIMPLEMENTED();
+}
+
+void CodeGenerator::VisitExpressionStatement(ExpressionStatement* a) {
+ UNIMPLEMENTED();
+}
+
+void CodeGenerator::VisitEmptyStatement(EmptyStatement* a) {
+ UNIMPLEMENTED();
+}
+
+void CodeGenerator::VisitIfStatement(IfStatement* a) {
+ UNIMPLEMENTED();
+}
+
+void CodeGenerator::VisitContinueStatement(ContinueStatement* a) {
+ UNIMPLEMENTED();
+}
+
+void CodeGenerator::VisitBreakStatement(BreakStatement* a) {
+ UNIMPLEMENTED();
+}
+
+void CodeGenerator::VisitReturnStatement(ReturnStatement* a) {
+ UNIMPLEMENTED();
+}
+
+void CodeGenerator::VisitWithEnterStatement(WithEnterStatement* a) {
+ UNIMPLEMENTED();
+}
+
+void CodeGenerator::VisitWithExitStatement(WithExitStatement* a) {
+ UNIMPLEMENTED();
+}
+
+void CodeGenerator::VisitSwitchStatement(SwitchStatement* a) {
+ UNIMPLEMENTED();
+}
+
+void CodeGenerator::VisitLoopStatement(LoopStatement* a) {
+ UNIMPLEMENTED();
+}
+
+void CodeGenerator::VisitForInStatement(ForInStatement* a) {
+ UNIMPLEMENTED();
+}
+
+void CodeGenerator::VisitTryCatch(TryCatch* a) {
+ UNIMPLEMENTED();
+}
+
+void CodeGenerator::VisitTryFinally(TryFinally* a) {
+ UNIMPLEMENTED();
+}
+
+void CodeGenerator::VisitDebuggerStatement(DebuggerStatement* a) {
+ UNIMPLEMENTED();
+}
+
+void CodeGenerator::VisitFunctionLiteral(FunctionLiteral* a) {
+ UNIMPLEMENTED();
+}
+
+void CodeGenerator::VisitFunctionBoilerplateLiteral(
+ FunctionBoilerplateLiteral* a) {
+ UNIMPLEMENTED();
+}
+
+void CodeGenerator::VisitConditional(Conditional* a) {
+ UNIMPLEMENTED();
+}
+
+void CodeGenerator::VisitSlot(Slot* a) {
+ UNIMPLEMENTED();
+}
+
+void CodeGenerator::VisitVariableProxy(VariableProxy* a) {
+ UNIMPLEMENTED();
+}
+
+void CodeGenerator::VisitLiteral(Literal* a) {
+ UNIMPLEMENTED();
+}
+
+void CodeGenerator::VisitRegExpLiteral(RegExpLiteral* a) {
+ UNIMPLEMENTED();
+}
+
+void CodeGenerator::VisitObjectLiteral(ObjectLiteral* a) {
+ UNIMPLEMENTED();
+}
+
+void CodeGenerator::VisitArrayLiteral(ArrayLiteral* a) {
+ UNIMPLEMENTED();
+}
+
+void CodeGenerator::VisitCatchExtensionObject(CatchExtensionObject* a) {
+ UNIMPLEMENTED();
+}
+
+void CodeGenerator::VisitAssignment(Assignment* a) {
+ UNIMPLEMENTED();
+}
+
+void CodeGenerator::VisitThrow(Throw* a) {
+ UNIMPLEMENTED();
+}
+
+void CodeGenerator::VisitProperty(Property* a) {
+ UNIMPLEMENTED();
+}
+
+void CodeGenerator::VisitCall(Call* a) {
+ UNIMPLEMENTED();
+}
+
+void CodeGenerator::VisitCallEval(CallEval* a) {
+ UNIMPLEMENTED();
+}
+
+void CodeGenerator::VisitCallNew(CallNew* a) {
+ UNIMPLEMENTED();
+}
+
+void CodeGenerator::VisitCallRuntime(CallRuntime* a) {
+ UNIMPLEMENTED();
+}
+
+void CodeGenerator::VisitUnaryOperation(UnaryOperation* a) {
+ UNIMPLEMENTED();
+}
+
+void CodeGenerator::VisitCountOperation(CountOperation* a) {
+ UNIMPLEMENTED();
+}
+
+void CodeGenerator::VisitBinaryOperation(BinaryOperation* a) {
+ UNIMPLEMENTED();
+}
+
+void CodeGenerator::VisitCompareOperation(CompareOperation* a) {
+ UNIMPLEMENTED();
+}
+
+void CodeGenerator::VisitThisFunction(ThisFunction* a) {
+ UNIMPLEMENTED();
+}
+
+
+void CEntryStub::GenerateBody(MacroAssembler* masm, bool is_debug_break) {
+ masm->int3(); // TODO(X64): UNIMPLEMENTED.
+}
+
+
+void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
+ Label invoke, exit;
+
+ // Setup frame.
+ __ push(rbp);
+ __ movq(rbp, rsp);
+
+ // Save callee-saved registers (X64 calling conventions).
+ int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY;
+ // Push something that is not an arguments adaptor.
+ __ push(Immediate(ArgumentsAdaptorFrame::NON_SENTINEL));
+ __ push(Immediate(Smi::FromInt(marker))); // @ function offset
+ __ push(r12);
+ __ push(r13);
+ __ push(r14);
+ __ push(r15);
+ __ push(rdi);
+ __ push(rsi);
+ __ push(rbx);
+ // TODO(X64): Push XMM6-XMM15 (low 64 bits) as well, or make them
+ // callee-save in JS code as well.
+
+ // Save copies of the top frame descriptor on the stack.
+ ExternalReference c_entry_fp(Top::k_c_entry_fp_address);
+ __ load_rax(c_entry_fp);
+ __ push(rax);
+
+ // Call a faked try-block that does the invoke.
+ __ call(&invoke);
+
+ // Caught exception: Store result (exception) in the pending
+ // exception field in the JSEnv and return a failure sentinel.
+ ExternalReference pending_exception(Top::k_pending_exception_address);
+ __ store_rax(pending_exception);
+ __ movq(rax, Failure::Exception(), RelocInfo::NONE);
+ __ jmp(&exit);
+
+ // Invoke: Link this frame into the handler chain.
+ __ bind(&invoke);
+ __ PushTryHandler(IN_JS_ENTRY, JS_ENTRY_HANDLER);
+ __ push(rax); // flush TOS
+
+ // Clear any pending exceptions.
+ __ load_rax(ExternalReference::the_hole_value_location());
+ __ store_rax(pending_exception);
+
+ // Fake a receiver (NULL).
+ __ push(Immediate(0)); // receiver
+
+ // Invoke the function by calling through JS entry trampoline
+ // builtin and pop the faked function when we return. We load the address
+ // from an external reference instead of inlining the call target address
+ // directly in the code, because the builtin stubs may not have been
+ // generated yet at the time this code is generated.
+ if (is_construct) {
+ ExternalReference construct_entry(Builtins::JSConstructEntryTrampoline);
+ __ load_rax(construct_entry);
+ } else {
+ ExternalReference entry(Builtins::JSEntryTrampoline);
+ __ load_rax(entry);
+ }
+ __ call(FieldOperand(rax, Code::kHeaderSize));
+
+ // Unlink this frame from the handler chain.
+ __ movq(kScratchRegister, ExternalReference(Top::k_handler_address));
+ __ pop(Operand(kScratchRegister, 0));
+ // Pop next_sp.
+ __ add(rsp, Immediate(StackHandlerConstants::kSize - kPointerSize));
+
+ // Restore the top frame descriptor from the stack.
+ __ bind(&exit);
+ __ movq(kScratchRegister, ExternalReference(Top::k_c_entry_fp_address));
+ __ pop(Operand(kScratchRegister, 0));
+
+ // Restore callee-saved registers (X64 conventions).
+ __ pop(rbx);
+ __ pop(rsi);
+ __ pop(rdi);
+ __ pop(r15);
+ __ pop(r14);
+ __ pop(r13);
+ __ pop(r12);
+ __ add(rsp, Immediate(2 * kPointerSize)); // remove markers
+
+ // Restore frame pointer and return.
+ __ pop(rbp);
+ __ ret(0);
+}
+
+
+#undef __
+
+} } // namespace v8::internal
diff --git a/deps/v8/src/x64/codegen-x64.h b/deps/v8/src/x64/codegen-x64.h
index 4acb0cb7f..5f5daa422 100644
--- a/deps/v8/src/x64/codegen-x64.h
+++ b/deps/v8/src/x64/codegen-x64.h
@@ -28,7 +28,8 @@
#ifndef V8_X64_CODEGEN_X64_H_
#define V8_X64_CODEGEN_X64_H_
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
// Forward declarations
class DeferredCode;
@@ -332,8 +333,7 @@ class CodeGenerator: public AstVisitor {
// Accessors
Scope* scope() const { return scope_; }
- // Clearing and generating deferred code.
- void ClearDeferred();
+ // Generating deferred code.
void ProcessDeferred();
bool is_eval() { return is_eval_; }
@@ -473,12 +473,19 @@ class CodeGenerator: public AstVisitor {
void CheckStack();
+ struct InlineRuntimeLUT {
+ void (CodeGenerator::*method)(ZoneList<Expression*>*);
+ const char* name;
+ };
+ static InlineRuntimeLUT* FindInlineRuntimeLUT(Handle<String> name);
bool CheckForInlineRuntimeCall(CallRuntime* node);
+ static bool PatchInlineRuntimeEntry(Handle<String> name,
+ const InlineRuntimeLUT& new_entry,
+ InlineRuntimeLUT* old_entry);
Handle<JSFunction> BuildBoilerplate(FunctionLiteral* node);
void ProcessDeclarations(ZoneList<Declaration*>* declarations);
- Handle<Code> ComputeCallInitialize(int argc);
- Handle<Code> ComputeCallInitializeInLoop(int argc);
+ Handle<Code> ComputeCallInitialize(int argc, InLoopFlag in_loop);
// Declare global variables and functions in the given array of
// name/value pairs.
@@ -570,14 +577,14 @@ class CodeGenerator: public AstVisitor {
void CodeForSourcePosition(int pos);
#ifdef DEBUG
- // True if the registers are valid for entry to a block. There should be
- // no frame-external references to eax, ebx, ecx, edx, or edi.
+ // True if the registers are valid for entry to a block. There should
+ // be no frame-external references to (non-reserved) registers.
bool HasValidEntryRegisters();
#endif
bool is_eval_; // Tells whether code is generated for eval.
Handle<Script> script_;
- List<DeferredCode*> deferred_;
+ ZoneList<DeferredCode*> deferred_;
// Assembler
MacroAssembler* masm_; // to generate code
@@ -604,6 +611,8 @@ class CodeGenerator: public AstVisitor {
// in a spilled state.
bool in_spilled_code_;
+ static InlineRuntimeLUT kInlineRuntimeLUT[];
+
friend class VirtualFrame;
friend class JumpTarget;
friend class Reference;
diff --git a/deps/v8/src/x64/cpu-x64.cc b/deps/v8/src/x64/cpu-x64.cc
index 209aa2d30..8df0ab7e9 100644
--- a/deps/v8/src/x64/cpu-x64.cc
+++ b/deps/v8/src/x64/cpu-x64.cc
@@ -25,3 +25,42 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// CPU specific code for x64 independent of OS goes here.
+
+#include "v8.h"
+
+#include "cpu.h"
+#include "macro-assembler.h"
+
+namespace v8 {
+namespace internal {
+
+void CPU::Setup() {
+ CpuFeatures::Probe();
+}
+
+
+void CPU::FlushICache(void* start, size_t size) {
+ // No need to flush the instruction cache on Intel. On Intel instruction
+ // cache flushing is only necessary when multiple cores running the same
+ // code simultaneously. V8 (and JavaScript) is single threaded and when code
+ // is patched on an intel CPU the core performing the patching will have its
+ // own instruction cache updated automatically.
+
+ // If flushing of the instruction cache becomes necessary Windows has the
+ // API function FlushInstructionCache.
+}
+
+
+void CPU::DebugBreak() {
+#ifdef _MSC_VER
+ // To avoid Visual Studio runtime support the following code can be used
+ // instead
+ // __asm { int 3 }
+ __debugbreak();
+#else
+ asm("int $3");
+#endif
+}
+
+} } // namespace v8::internal
diff --git a/deps/v8/src/x64/debug-x64.cc b/deps/v8/src/x64/debug-x64.cc
index 209aa2d30..3b101325e 100644
--- a/deps/v8/src/x64/debug-x64.cc
+++ b/deps/v8/src/x64/debug-x64.cc
@@ -25,3 +25,59 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "codegen-inl.h"
+#include "debug.h"
+
+
+namespace v8 {
+namespace internal {
+
+#ifdef ENABLE_DEBUGGER_SUPPORT
+
+bool Debug::IsDebugBreakAtReturn(v8::internal::RelocInfo* rinfo) {
+ UNIMPLEMENTED();
+ return false;
+}
+
+void Debug::GenerateCallICDebugBreak(MacroAssembler* masm) {
+ masm->int3(); // UNIMPLEMENTED
+}
+
+void Debug::GenerateConstructCallDebugBreak(MacroAssembler* masm) {
+ masm->int3(); // UNIMPLEMENTED
+}
+
+void Debug::GenerateKeyedLoadICDebugBreak(MacroAssembler* masm) {
+ masm->int3(); // UNIMPLEMENTED
+}
+
+void Debug::GenerateKeyedStoreICDebugBreak(MacroAssembler* masm) {
+ masm->int3(); // UNIMPLEMENTED
+}
+
+void Debug::GenerateLoadICDebugBreak(MacroAssembler* masm) {
+ masm->int3(); // UNIMPLEMENTED
+}
+
+void Debug::GenerateReturnDebugBreak(MacroAssembler* masm) {
+ masm->int3(); // UNIMPLEMENTED
+}
+
+void Debug::GenerateReturnDebugBreakEntry(MacroAssembler* masm) {
+ masm->int3(); // UNIMPLEMENTED
+}
+
+void Debug::GenerateStoreICDebugBreak(MacroAssembler* masm) {
+ masm->int3(); // UNIMPLEMENTED
+}
+
+void Debug::GenerateStubNoRegistersDebugBreak(MacroAssembler* masm) {
+ masm->int3(); // UNIMPLEMENTED
+}
+
+#endif // ENABLE_DEBUGGER_SUPPORT
+
+} } // namespace v8::internal
diff --git a/deps/v8/src/x64/disasm-x64.cc b/deps/v8/src/x64/disasm-x64.cc
index 209aa2d30..767b1247a 100644
--- a/deps/v8/src/x64/disasm-x64.cc
+++ b/deps/v8/src/x64/disasm-x64.cc
@@ -25,3 +25,64 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#include "v8.h"
+#include "disasm.h"
+
+namespace disasm {
+
+Disassembler::Disassembler(NameConverter const& converter)
+ : converter_(converter) {
+ UNIMPLEMENTED();
+}
+
+
+Disassembler::~Disassembler() {
+ UNIMPLEMENTED();
+}
+
+
+const char* NameConverter::NameOfAddress(unsigned char* addr) const {
+ UNIMPLEMENTED();
+ return NULL;
+}
+
+
+const char* NameConverter::NameOfCPURegister(int reg) const {
+ UNIMPLEMENTED();
+ return NULL;
+}
+
+
+int Disassembler::ConstantPoolSizeAt(unsigned char* addr) {
+ UNIMPLEMENTED();
+ return 0;
+}
+
+
+int Disassembler::InstructionDecode(v8::internal::Vector<char> buffer,
+ unsigned char* instruction) {
+ UNIMPLEMENTED();
+ return 0;
+}
+
+const char* NameConverter::NameOfByteCPURegister(int a) const {
+ UNIMPLEMENTED();
+ return NULL;
+}
+
+const char* NameConverter::NameOfXMMRegister(int a) const {
+ UNIMPLEMENTED();
+ return NULL;
+}
+
+const char* NameConverter::NameOfConstant(unsigned char* a) const {
+ UNIMPLEMENTED();
+ return NULL;
+}
+
+const char* NameConverter::NameInCode(unsigned char* a) const {
+ UNIMPLEMENTED();
+ return NULL;
+}
+
+} // namespace disasm
diff --git a/deps/v8/src/x64/frames-x64.h b/deps/v8/src/x64/frames-x64.h
index f4468f6a2..3416f51de 100644
--- a/deps/v8/src/x64/frames-x64.h
+++ b/deps/v8/src/x64/frames-x64.h
@@ -28,7 +28,8 @@
#ifndef V8_X64_FRAMES_X64_H_
#define V8_X64_FRAMES_X64_H_
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
// TODO(x64): This is a stub, mostly just a copy of the ia32 bit version.
// This will all need to change to be correct for x64.
@@ -40,17 +41,17 @@ typedef Object* JSCallerSavedBuffer[kNumJSCallerSaved];
class StackHandlerConstants : public AllStatic {
public:
- static const int kNextOffset = -1 * kPointerSize;
- static const int kPPOffset = -1 * kPointerSize;
- static const int kFPOffset = -1 * kPointerSize;
+ static const int kNextOffset = 0 * kPointerSize;
+ static const int kPPOffset = 1 * kPointerSize;
+ static const int kFPOffset = 2 * kPointerSize;
- static const int kCodeOffset = -1 * kPointerSize;
+ static const int kCodeOffset = 3 * kPointerSize;
- static const int kStateOffset = -1 * kPointerSize;
- static const int kPCOffset = -1 * kPointerSize;
+ static const int kStateOffset = 4 * kPointerSize;
+ static const int kPCOffset = 5 * kPointerSize;
static const int kAddressDisplacement = -1 * kPointerSize;
- static const int kSize = kPCOffset + kPointerSize;
+ static const int kSize = 6 * kPointerSize;
};
diff --git a/deps/v8/src/x64/ic-x64.cc b/deps/v8/src/x64/ic-x64.cc
index 209aa2d30..71a3a9ab8 100644
--- a/deps/v8/src/x64/ic-x64.cc
+++ b/deps/v8/src/x64/ic-x64.cc
@@ -25,3 +25,152 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#include "v8.h"
+
+#include "codegen-inl.h"
+#include "ic-inl.h"
+#include "runtime.h"
+#include "stub-cache.h"
+
+namespace v8 {
+namespace internal {
+
+
+void KeyedLoadIC::ClearInlinedVersion(Address address) {
+ UNIMPLEMENTED();
+}
+
+void KeyedLoadIC::Generate(MacroAssembler* masm,
+ ExternalReference const& f) {
+ masm->int3(); // UNIMPLEMENTED.
+}
+
+void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
+ masm->int3(); // UNIMPLEMENTED.
+}
+
+void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
+ masm->int3(); // UNIMPLEMENTED.
+}
+
+bool KeyedLoadIC::PatchInlinedLoad(Address address, Object* map) {
+ UNIMPLEMENTED();
+ return false;
+}
+
+Object* KeyedLoadStubCompiler::CompileLoadArrayLength(String* name) {
+ UNIMPLEMENTED();
+ return NULL;
+}
+
+Object* KeyedLoadStubCompiler::CompileLoadCallback(String* name,
+ JSObject* object,
+ JSObject* holder,
+ AccessorInfo* callback) {
+ UNIMPLEMENTED();
+ return NULL;
+}
+
+Object* KeyedLoadStubCompiler::CompileLoadConstant(String* name,
+ JSObject* object,
+ JSObject* holder,
+ Object* callback) {
+ UNIMPLEMENTED();
+ return NULL;
+}
+
+Object* KeyedLoadStubCompiler::CompileLoadField(String* name,
+ JSObject* object,
+ JSObject* holder,
+ int index) {
+ UNIMPLEMENTED();
+ return NULL;
+}
+
+Object* KeyedLoadStubCompiler::CompileLoadFunctionPrototype(String* name) {
+ UNIMPLEMENTED();
+ return NULL;
+}
+
+Object* KeyedLoadStubCompiler::CompileLoadInterceptor(JSObject* object,
+ JSObject* holder,
+ String* name) {
+ UNIMPLEMENTED();
+ return NULL;
+}
+
+Object* KeyedLoadStubCompiler::CompileLoadStringLength(String* name) {
+ UNIMPLEMENTED();
+ return NULL;
+}
+
+void KeyedStoreIC::Generate(MacroAssembler* masm, ExternalReference const& f) {
+ masm->int3(); // UNIMPLEMENTED.
+}
+
+void KeyedStoreIC::GenerateExtendStorage(MacroAssembler* masm) {
+ masm->int3(); // UNIMPLEMENTED.
+}
+
+void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
+ masm->int3(); // UNIMPLEMENTED.
+}
+
+Object* KeyedStoreStubCompiler::CompileStoreField(JSObject* object,
+ int index,
+ Map* transition,
+ String* name) {
+ UNIMPLEMENTED();
+ return NULL;
+}
+
+void LoadIC::ClearInlinedVersion(Address address) {
+ UNIMPLEMENTED();
+}
+
+void LoadIC::Generate(MacroAssembler* masm, ExternalReference const& f) {
+ masm->int3(); // UNIMPLEMENTED.
+}
+
+void LoadIC::GenerateArrayLength(MacroAssembler* masm) {
+ masm->int3(); // UNIMPLEMENTED.
+}
+
+void LoadIC::GenerateFunctionPrototype(MacroAssembler* masm) {
+ masm->int3(); // UNIMPLEMENTED.
+}
+
+void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
+ masm->int3(); // UNIMPLEMENTED.
+}
+
+void LoadIC::GenerateMiss(MacroAssembler* masm) {
+ masm->int3(); // UNIMPLEMENTED.
+}
+
+void LoadIC::GenerateNormal(MacroAssembler* masm) {
+ masm->int3(); // UNIMPLEMENTED.
+}
+
+void LoadIC::GenerateStringLength(MacroAssembler* masm) {
+ masm->int3(); // UNIMPLEMENTED.
+}
+
+bool LoadIC::PatchInlinedLoad(Address address, Object* map, int index) {
+ UNIMPLEMENTED();
+ return false;
+}
+
+void StoreIC::Generate(MacroAssembler* masm, ExternalReference const& f) {
+ masm->int3(); // UNIMPLEMENTED.
+}
+
+void StoreIC::GenerateExtendStorage(MacroAssembler* masm) {
+ masm->int3(); // UNIMPLEMENTED.
+}
+
+void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
+ masm->int3(); // UNIMPLEMENTED.
+}
+
+} } // namespace v8::internal
diff --git a/deps/v8/src/x64/macro-assembler-x64.cc b/deps/v8/src/x64/macro-assembler-x64.cc
index 209aa2d30..54c299dbf 100644
--- a/deps/v8/src/x64/macro-assembler-x64.cc
+++ b/deps/v8/src/x64/macro-assembler-x64.cc
@@ -25,3 +25,92 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#include "v8.h"
+
+#include "bootstrapper.h"
+#include "codegen-inl.h"
+#include "macro-assembler-x64.h"
+
+namespace v8 {
+namespace internal {
+
+MacroAssembler::MacroAssembler(void* buffer, int size)
+ : Assembler(buffer, size),
+ unresolved_(0),
+ generating_stub_(false),
+ allow_stub_calls_(true),
+ code_object_(Heap::undefined_value()) {
+}
+
+
+void MacroAssembler::TailCallRuntime(ExternalReference const& a, int b) {
+ UNIMPLEMENTED();
+}
+
+
+void MacroAssembler::Set(Register dst, int64_t x) {
+ if (is_int32(x)) {
+ movq(dst, Immediate(x));
+ } else if (is_uint32(x)) {
+ movl(dst, Immediate(x));
+ } else {
+ movq(dst, x, RelocInfo::NONE);
+ }
+}
+
+
+void MacroAssembler::Set(const Operand& dst, int64_t x) {
+ if (is_int32(x)) {
+ movq(kScratchRegister, Immediate(x));
+ } else if (is_uint32(x)) {
+ movl(kScratchRegister, Immediate(x));
+ } else {
+ movq(kScratchRegister, x, RelocInfo::NONE);
+ }
+ movq(dst, kScratchRegister);
+}
+
+
+void MacroAssembler::PushTryHandler(CodeLocation try_location,
+ HandlerType type) {
+ // The pc (return address) is already on TOS.
+ // This code pushes state, code, frame pointer and parameter pointer.
+ // Check that they are expected next on the stack, int that order.
+ ASSERT_EQ(StackHandlerConstants::kStateOffset,
+ StackHandlerConstants::kPCOffset - kPointerSize);
+ ASSERT_EQ(StackHandlerConstants::kCodeOffset,
+ StackHandlerConstants::kStateOffset - kPointerSize);
+ ASSERT_EQ(StackHandlerConstants::kFPOffset,
+ StackHandlerConstants::kCodeOffset - kPointerSize);
+ ASSERT_EQ(StackHandlerConstants::kPPOffset,
+ StackHandlerConstants::kFPOffset - kPointerSize);
+
+ if (try_location == IN_JAVASCRIPT) {
+ if (type == TRY_CATCH_HANDLER) {
+ push(Immediate(StackHandler::TRY_CATCH));
+ } else {
+ push(Immediate(StackHandler::TRY_FINALLY));
+ }
+ push(Immediate(Smi::FromInt(StackHandler::kCodeNotPresent)));
+ push(rbp);
+ push(rdi);
+ } else {
+ ASSERT(try_location == IN_JS_ENTRY);
+ // The parameter pointer is meaningless here and ebp does not
+ // point to a JS frame. So we save NULL for both pp and ebp. We
+ // expect the code throwing an exception to check ebp before
+ // dereferencing it to restore the context.
+ push(Immediate(StackHandler::ENTRY));
+ push(Immediate(Smi::FromInt(StackHandler::kCodeNotPresent)));
+ push(Immediate(0)); // NULL frame pointer
+ push(Immediate(0)); // NULL parameter pointer
+ }
+ movq(kScratchRegister, ExternalReference(Top::k_handler_address));
+ // Cached TOS.
+ movq(rax, Operand(kScratchRegister, 0));
+ // Link this handler.
+ movq(Operand(kScratchRegister, 0), rsp);
+}
+
+
+} } // namespace v8::internal
diff --git a/deps/v8/src/x64/macro-assembler-x64.h b/deps/v8/src/x64/macro-assembler-x64.h
index 159d0c4f3..4af372a81 100644
--- a/deps/v8/src/x64/macro-assembler-x64.h
+++ b/deps/v8/src/x64/macro-assembler-x64.h
@@ -30,7 +30,13 @@
#include "assembler.h"
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
+
+// Default scratch register used by MacroAssembler (and other code that needs
+// a spare register). The register isn't callee save, and not used by the
+// function calling convention.
+static const Register kScratchRegister = r10;
// Forward declaration.
class JumpTarget;
@@ -136,8 +142,8 @@ class MacroAssembler: public Assembler {
void GetBuiltinEntry(Register target, Builtins::JavaScript id);
// Expression support
- void Set(Register dst, const Immediate& x);
- void Set(const Operand& dst, const Immediate& x);
+ void Set(Register dst, int64_t x);
+ void Set(const Operand& dst, int64_t x);
// Compare object type for heap object.
// Incoming register is heap_object and outgoing register is map.
@@ -155,7 +161,7 @@ class MacroAssembler: public Assembler {
// Push a new try handler and link into try handler chain.
// The return address must be pushed before calling this helper.
- // On exit, eax contains TOS (next_sp).
+ // On exit, rax contains TOS (next_sp).
void PushTryHandler(CodeLocation try_location, HandlerType type);
diff --git a/deps/v8/src/x64/register-allocator-x64-inl.h b/deps/v8/src/x64/register-allocator-x64-inl.h
new file mode 100644
index 000000000..f369d7d8d
--- /dev/null
+++ b/deps/v8/src/x64/register-allocator-x64-inl.h
@@ -0,0 +1,69 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_X64_REGISTER_ALLOCATOR_X64_INL_H_
+#define V8_X64_REGISTER_ALLOCATOR_X64_INL_H_
+
+#include "v8.h"
+
+namespace v8 {
+namespace internal {
+
+// -------------------------------------------------------------------------
+// RegisterAllocator implementation.
+
+bool RegisterAllocator::IsReserved(Register reg) {
+ // All registers are reserved for now.
+ return true;
+}
+
+
+// The register allocator uses small integers to represent the
+// non-reserved assembler registers.
+
+int RegisterAllocator::ToNumber(Register reg) {
+ ASSERT(reg.is_valid() && !IsReserved(reg));
+ UNIMPLEMENTED();
+ return -1;
+}
+
+
+Register RegisterAllocator::ToRegister(int num) {
+ ASSERT(num >= 0 && num < kNumRegisters);
+ UNIMPLEMENTED();
+ return no_reg;
+}
+
+
+void RegisterAllocator::Initialize() {
+ UNIMPLEMENTED();
+}
+
+
+} } // namespace v8::internal
+
+#endif // V8_X64_REGISTER_ALLOCATOR_X64_INL_H_
diff --git a/deps/v8/src/x64/register-allocator-x64.h b/deps/v8/src/x64/register-allocator-x64.h
new file mode 100644
index 000000000..bc0811247
--- /dev/null
+++ b/deps/v8/src/x64/register-allocator-x64.h
@@ -0,0 +1,45 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_X64_REGISTER_ALLOCATOR_X64_H_
+#define V8_X64_REGISTER_ALLOCATOR_X64_H_
+
+namespace v8 {
+namespace internal {
+
+class RegisterAllocatorConstants : public AllStatic {
+ public:
+ // Register allocation is not yet implemented on x64, but C++
+ // forbids 0-length arrays so we use 1 as the number of registers.
+ static const int kNumRegisters = 1;
+ static const int kInvalidRegister = -1;
+};
+
+
+} } // namespace v8::internal
+
+#endif // V8_X64_REGISTER_ALLOCATOR_X64_H_
diff --git a/deps/v8/src/x64/virtual-frame-x64.h b/deps/v8/src/x64/virtual-frame-x64.h
index f71766d03..d341a1eee 100644
--- a/deps/v8/src/x64/virtual-frame-x64.h
+++ b/deps/v8/src/x64/virtual-frame-x64.h
@@ -29,8 +29,10 @@
#define V8_X64_VIRTUAL_FRAME_X64_H_
#include "register-allocator.h"
+#include "scopes.h"
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
// -------------------------------------------------------------------------
// Virtual frames
@@ -41,7 +43,7 @@ namespace v8 { namespace internal {
// as random access to the expression stack elements, locals, and
// parameters.
-class VirtualFrame : public Malloced {
+class VirtualFrame : public ZoneObject {
public:
// A utility class to introduce a scope where the virtual frame is
// expected to remain spilled. The constructor spills the code
@@ -50,42 +52,66 @@ class VirtualFrame : public Malloced {
// generator is being transformed.
class SpilledScope BASE_EMBEDDED {
public:
- explicit SpilledScope(CodeGenerator* cgen);
+ SpilledScope() : previous_state_(cgen()->in_spilled_code()) {
+ ASSERT(cgen()->has_valid_frame());
+ cgen()->frame()->SpillAll();
+ cgen()->set_in_spilled_code(true);
+ }
- ~SpilledScope();
+ ~SpilledScope() {
+ cgen()->set_in_spilled_code(previous_state_);
+ }
private:
- CodeGenerator* cgen_;
bool previous_state_;
+
+ CodeGenerator* cgen() { return CodeGeneratorScope::Current(); }
};
// An illegal index into the virtual frame.
static const int kIllegalIndex = -1;
// Construct an initial virtual frame on entry to a JS function.
- explicit VirtualFrame(CodeGenerator* cgen);
+ VirtualFrame();
// Construct a virtual frame as a clone of an existing one.
explicit VirtualFrame(VirtualFrame* original);
+ CodeGenerator* cgen() { return CodeGeneratorScope::Current(); }
+ MacroAssembler* masm() { return cgen()->masm(); }
+
// Create a duplicate of an existing valid frame element.
FrameElement CopyElementAt(int index);
+ // The number of elements on the virtual frame.
+ int element_count() { return elements_.length(); }
+
// The height of the virtual expression stack.
- int height() const {
- return elements_.length() - expression_base_index();
+ int height() {
+ return element_count() - expression_base_index();
+ }
+
+ int register_location(int num) {
+ ASSERT(num >= 0 && num < RegisterAllocator::kNumRegisters);
+ return register_locations_[num];
}
- int register_index(Register reg) {
- return register_locations_[reg.code()];
+ int register_location(Register reg) {
+ return register_locations_[RegisterAllocator::ToNumber(reg)];
}
- bool is_used(int reg_code) {
- return register_locations_[reg_code] != kIllegalIndex;
+ void set_register_location(Register reg, int index) {
+ register_locations_[RegisterAllocator::ToNumber(reg)] = index;
+ }
+
+ bool is_used(int num) {
+ ASSERT(num >= 0 && num < RegisterAllocator::kNumRegisters);
+ return register_locations_[num] != kIllegalIndex;
}
bool is_used(Register reg) {
- return is_used(reg.code());
+ return register_locations_[RegisterAllocator::ToNumber(reg)]
+ != kIllegalIndex;
}
// Add extra in-memory elements to the top of the frame to match an actual
@@ -98,7 +124,12 @@ class VirtualFrame : public Malloced {
// match an external frame effect (examples include a call removing
// its arguments, and exiting a try/catch removing an exception
// handler). No code will be emitted.
- void Forget(int count);
+ void Forget(int count) {
+ ASSERT(count >= 0);
+ ASSERT(stack_pointer_ == element_count() - 1);
+ stack_pointer_ -= count;
+ ForgetElements(count);
+ }
// Forget count elements from the top of the frame without adjusting
// the stack pointer downward. This is used, for example, before
@@ -109,13 +140,25 @@ class VirtualFrame : public Malloced {
void SpillAll();
// Spill all occurrences of a specific register from the frame.
- void Spill(Register reg);
+ void Spill(Register reg) {
+ if (is_used(reg)) SpillElementAt(register_location(reg));
+ }
// Spill all occurrences of an arbitrary register if possible. Return the
// register spilled or no_reg if it was not possible to free any register
// (ie, they all have frame-external references).
Register SpillAnyRegister();
+ // Sync the range of elements in [begin, end] with memory.
+ void SyncRange(int begin, int end);
+
+ // Make this frame so that an arbitrary frame of the same height can
+ // be merged to it. Copies and constants are removed from the
+ // topmost mergable_elements elements of the frame. A
+ // mergable_elements of JumpTarget::kAllElements indicates constants
+ // and copies are should be removed from the entire frame.
+ void MakeMergable(int mergable_elements);
+
// Prepare this virtual frame for merging to an expected frame by
// performing some state changes that do not require generating
// code. It is guaranteed that no code will be generated.
@@ -130,13 +173,23 @@ class VirtualFrame : public Malloced {
// tells the register allocator that it is free to use frame-internal
// registers. Used when the code generator's frame is switched from this
// one to NULL by an unconditional jump.
- void DetachFromCodeGenerator();
+ void DetachFromCodeGenerator() {
+ RegisterAllocator* cgen_allocator = cgen()->allocator();
+ for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
+ if (is_used(i)) cgen_allocator->Unuse(i);
+ }
+ }
// (Re)attach a frame to its code generator. This informs the register
// allocator that the frame-internal register references are active again.
// Used when a code generator's frame is switched from NULL to this one by
// binding a label.
- void AttachToCodeGenerator();
+ void AttachToCodeGenerator() {
+ RegisterAllocator* cgen_allocator = cgen()->allocator();
+ for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
+ if (is_used(i)) cgen_allocator->Use(i);
+ }
+ }
// Emit code for the physical JS entry and exit frame sequences. After
// calling Enter, the virtual frame is ready for use; and after calling
@@ -151,7 +204,7 @@ class VirtualFrame : public Malloced {
void PrepareForReturn();
// Allocate and initialize the frame-allocated locals.
- void AllocateStackSlots(int count);
+ void AllocateStackSlots();
// An element of the expression stack as an assembly operand.
Operand ElementAt(int index) const {
@@ -164,22 +217,22 @@ class VirtualFrame : public Malloced {
// Set a frame element to a constant. The index is frame-top relative.
void SetElementAt(int index, Handle<Object> value) {
- Result temp(value, cgen_);
+ Result temp(value);
SetElementAt(index, &temp);
}
void PushElementAt(int index) {
- PushFrameSlotAt(elements_.length() - index - 1);
+ PushFrameSlotAt(element_count() - index - 1);
}
void StoreToElementAt(int index) {
- StoreToFrameSlotAt(elements_.length() - index - 1);
+ StoreToFrameSlotAt(element_count() - index - 1);
}
// A frame-allocated local as an assembly operand.
- Operand LocalAt(int index) const {
+ Operand LocalAt(int index) {
ASSERT(0 <= index);
- ASSERT(index < local_count_);
+ ASSERT(index < local_count());
return Operand(rbp, kLocal0Offset - index * kPointerSize);
}
@@ -215,10 +268,10 @@ class VirtualFrame : public Malloced {
void RestoreContextRegister();
// A parameter as an assembly operand.
- Operand ParameterAt(int index) const {
+ Operand ParameterAt(int index) {
ASSERT(-1 <= index); // -1 is the receiver.
- ASSERT(index < parameter_count_);
- return Operand(rbp, (1 + parameter_count_ - index) * kPointerSize);
+ ASSERT(index < parameter_count());
+ return Operand(rbp, (1 + parameter_count() - index) * kPointerSize);
}
// Push a copy of the value of a parameter frame slot on top of the frame.
@@ -240,14 +293,17 @@ class VirtualFrame : public Malloced {
}
// The receiver frame slot.
- Operand Receiver() const { return ParameterAt(-1); }
+ Operand Receiver() { return ParameterAt(-1); }
// Push a try-catch or try-finally handler on top of the virtual frame.
void PushTryHandler(HandlerType type);
// Call stub given the number of arguments it expects on (and
// removes from) the stack.
- Result CallStub(CodeStub* stub, int arg_count);
+ Result CallStub(CodeStub* stub, int arg_count) {
+ PrepareForCall(arg_count, arg_count);
+ return RawCallStub(stub);
+ }
// Call stub that takes a single argument passed in eax. The
// argument is given as a result which does not have to be eax or
@@ -307,7 +363,7 @@ class VirtualFrame : public Malloced {
void Drop() { Drop(1); }
// Duplicate the top element of the frame.
- void Dup() { PushFrameSlotAt(elements_.length() - 1); }
+ void Dup() { PushFrameSlotAt(element_count() - 1); }
// Pop an element from the top of the expression stack. Returns a
// Result, which may be a constant or a register.
@@ -331,7 +387,15 @@ class VirtualFrame : public Malloced {
// Pushing a result invalidates it (its contents become owned by the
// frame).
- void Push(Result* result);
+ void Push(Result* result) {
+ if (result->is_register()) {
+ Push(result->reg(), result->static_type());
+ } else {
+ ASSERT(result->is_constant());
+ Push(result->handle());
+ }
+ result->Unuse();
+ }
// Nip removes zero or more elements from immediately below the top
// of the frame, leaving the previous top-of-frame value on top of
@@ -346,70 +410,69 @@ class VirtualFrame : public Malloced {
static const int kHandlerSize = StackHandlerConstants::kSize / kPointerSize;
static const int kPreallocatedElements = 5 + 8; // 8 expression stack slots.
- CodeGenerator* cgen_;
- MacroAssembler* masm_;
-
- List<FrameElement> elements_;
-
- // The number of frame-allocated locals and parameters respectively.
- int parameter_count_;
- int local_count_;
+ ZoneList<FrameElement> elements_;
// The index of the element that is at the processor's stack pointer
// (the esp register).
int stack_pointer_;
- // The index of the element that is at the processor's frame pointer
- // (the ebp register).
- int frame_pointer_;
-
// The index of the register frame element using each register, or
// kIllegalIndex if a register is not on the frame.
- int register_locations_[kNumRegisters];
+ int register_locations_[RegisterAllocator::kNumRegisters];
+
+ // The number of frame-allocated locals and parameters respectively.
+ int parameter_count() { return cgen()->scope()->num_parameters(); }
+ int local_count() { return cgen()->scope()->num_stack_slots(); }
+
+ // The index of the element that is at the processor's frame pointer
+ // (the ebp register). The parameters, receiver, and return address
+ // are below the frame pointer.
+ int frame_pointer() { return parameter_count() + 2; }
// The index of the first parameter. The receiver lies below the first
// parameter.
- int param0_index() const { return 1; }
+ int param0_index() { return 1; }
- // The index of the context slot in the frame.
- int context_index() const {
- ASSERT(frame_pointer_ != kIllegalIndex);
- return frame_pointer_ + 1;
- }
+ // The index of the context slot in the frame. It is immediately
+ // above the frame pointer.
+ int context_index() { return frame_pointer() + 1; }
- // The index of the function slot in the frame. It lies above the context
- // slot.
- int function_index() const {
- ASSERT(frame_pointer_ != kIllegalIndex);
- return frame_pointer_ + 2;
- }
+ // The index of the function slot in the frame. It is above the frame
+ // pointer and the context slot.
+ int function_index() { return frame_pointer() + 2; }
- // The index of the first local. Between the parameters and the locals
- // lie the return address, the saved frame pointer, the context, and the
- // function.
- int local0_index() const {
- ASSERT(frame_pointer_ != kIllegalIndex);
- return frame_pointer_ + 3;
- }
+ // The index of the first local. Between the frame pointer and the
+ // locals lie the context and the function.
+ int local0_index() { return frame_pointer() + 3; }
// The index of the base of the expression stack.
- int expression_base_index() const { return local0_index() + local_count_; }
+ int expression_base_index() { return local0_index() + local_count(); }
// Convert a frame index into a frame pointer relative offset into the
// actual stack.
- int fp_relative(int index) const {
- return (frame_pointer_ - index) * kPointerSize;
+ int fp_relative(int index) {
+ ASSERT(index < element_count());
+ ASSERT(frame_pointer() < element_count()); // FP is on the frame.
+ return (frame_pointer() - index) * kPointerSize;
}
// Record an occurrence of a register in the virtual frame. This has the
// effect of incrementing the register's external reference count and
// of updating the index of the register's location in the frame.
- void Use(Register reg, int index);
+ void Use(Register reg, int index) {
+ ASSERT(!is_used(reg));
+ set_register_location(reg, index);
+ cgen()->allocator()->Use(reg);
+ }
// Record that a register reference has been dropped from the frame. This
// decrements the register's external reference count and invalidates the
// index of the register's location in the frame.
- void Unuse(Register reg);
+ void Unuse(Register reg) {
+ ASSERT(is_used(reg));
+ set_register_location(reg, kIllegalIndex);
+ cgen()->allocator()->Unuse(reg);
+ }
// Spill the element at a particular index---write it to memory if
// necessary, free any associated register, and forget its value if
@@ -421,9 +484,6 @@ class VirtualFrame : public Malloced {
// Keep the element type as register or constant, and clear the dirty bit.
void SyncElementAt(int index);
- // Sync the range of elements in [begin, end).
- void SyncRange(int begin, int end);
-
// Sync a single unsynced element that lies beneath or at the stack pointer.
void SyncElementBelowStackPointer(int index);
@@ -485,9 +545,12 @@ class VirtualFrame : public Malloced {
bool Equals(VirtualFrame* other);
+ // Classes that need raw access to the elements_ array.
+ friend class DeferredCode;
friend class JumpTarget;
};
+
} } // namespace v8::internal
#endif // V8_X64_VIRTUAL_FRAME_X64_H_
diff --git a/deps/v8/src/zone-inl.h b/deps/v8/src/zone-inl.h
index 69b9a0a21..9af6251bf 100644
--- a/deps/v8/src/zone-inl.h
+++ b/deps/v8/src/zone-inl.h
@@ -31,7 +31,8 @@
#include "zone.h"
#include "v8-counters.h"
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
inline void* Zone::New(int size) {
diff --git a/deps/v8/src/zone.cc b/deps/v8/src/zone.cc
index c8f9c85a2..d78c19b89 100644
--- a/deps/v8/src/zone.cc
+++ b/deps/v8/src/zone.cc
@@ -29,7 +29,8 @@
#include "zone-inl.h"
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
Address Zone::position_ = 0;
diff --git a/deps/v8/src/zone.h b/deps/v8/src/zone.h
index df6915552..a8b26e9fd 100644
--- a/deps/v8/src/zone.h
+++ b/deps/v8/src/zone.h
@@ -28,7 +28,8 @@
#ifndef V8_ZONE_H_
#define V8_ZONE_H_
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
// Zone scopes are in one of two modes. Either they delete the zone
@@ -180,8 +181,13 @@ class ZoneScope BASE_EMBEDDED {
nesting_++;
}
- ~ZoneScope() {
- if (--nesting_ == 0 && mode_ == DELETE_ON_EXIT) Zone::DeleteAll();
+ virtual ~ZoneScope() {
+ if (ShouldDeleteOnExit()) Zone::DeleteAll();
+ --nesting_;
+ }
+
+ bool ShouldDeleteOnExit() {
+ return nesting_ == 1 && mode_ == DELETE_ON_EXIT;
}
// For ZoneScopes that do not delete on exit by default, call this
diff --git a/deps/v8/test/cctest/SConscript b/deps/v8/test/cctest/SConscript
index 740acbaa6..7506d2921 100644
--- a/deps/v8/test/cctest/SConscript
+++ b/deps/v8/test/cctest/SConscript
@@ -48,6 +48,7 @@ SOURCES = {
'test-list.cc',
'test-lock.cc',
'test-log.cc',
+ 'test-log-utils.cc',
'test-mark-compact.cc',
'test-regexp.cc',
'test-serialize.cc',
@@ -64,6 +65,7 @@ SOURCES = {
'test-disasm-ia32.cc',
'test-log-ia32.cc'
],
+ 'arch:x64': ['test-assembler-x64.cc'],
'os:linux': ['test-platform-linux.cc'],
'os:macos': ['test-platform-macos.cc'],
'os:nullos': ['test-platform-nullos.cc'],
diff --git a/deps/v8/test/cctest/cctest.status b/deps/v8/test/cctest/cctest.status
index 7b43c8de9..a8c218016 100644
--- a/deps/v8/test/cctest/cctest.status
+++ b/deps/v8/test/cctest/cctest.status
@@ -40,6 +40,15 @@ test-spaces/LargeObjectSpace: PASS || FAIL
# BUG(240): Test seems flaky on ARM.
test-api/RegExpInterruption: SKIP
+# We cannot assume that we can throw OutOfMemory exceptions in all situations.
+# Apparently our ARM box is in such a state. Skip the test as it also runs for
+# a long time.
+test-api/OutOfMemory: SKIP
+test-api/OutOfMemoryNested: SKIP
+
+# BUG(355): Test crashes on ARM.
+test-log/ProfLazyMode: SKIP
+
[ $simulator == arm ]
# BUG(271): During exception propagation, we compare pointers into the
diff --git a/deps/v8/test/cctest/test-api.cc b/deps/v8/test/cctest/test-api.cc
index 4b55b6be0..48157d80b 100644
--- a/deps/v8/test/cctest/test-api.cc
+++ b/deps/v8/test/cctest/test-api.cc
@@ -30,6 +30,7 @@
#include "v8.h"
#include "api.h"
+#include "compilation-cache.h"
#include "snapshot.h"
#include "platform.h"
#include "top.h"
@@ -464,6 +465,7 @@ THREADED_TEST(ScriptUsingStringResource) {
v8::internal::Heap::CollectAllGarbage();
CHECK_EQ(0, TestResource::dispose_count);
}
+ v8::internal::CompilationCache::Clear();
v8::internal::Heap::CollectAllGarbage();
CHECK_EQ(1, TestResource::dispose_count);
}
@@ -484,6 +486,7 @@ THREADED_TEST(ScriptUsingAsciiStringResource) {
v8::internal::Heap::CollectAllGarbage();
CHECK_EQ(0, TestAsciiResource::dispose_count);
}
+ v8::internal::CompilationCache::Clear();
v8::internal::Heap::CollectAllGarbage();
CHECK_EQ(1, TestAsciiResource::dispose_count);
}
@@ -505,6 +508,7 @@ THREADED_TEST(ScriptMakingExternalString) {
v8::internal::Heap::CollectAllGarbage();
CHECK_EQ(0, TestResource::dispose_count);
}
+ v8::internal::CompilationCache::Clear();
v8::internal::Heap::CollectAllGarbage();
CHECK_EQ(1, TestResource::dispose_count);
}
@@ -527,35 +531,43 @@ THREADED_TEST(ScriptMakingExternalAsciiString) {
v8::internal::Heap::CollectAllGarbage();
CHECK_EQ(0, TestAsciiResource::dispose_count);
}
+ v8::internal::CompilationCache::Clear();
v8::internal::Heap::CollectAllGarbage();
CHECK_EQ(1, TestAsciiResource::dispose_count);
}
THREADED_TEST(UsingExternalString) {
- v8::HandleScope scope;
- uint16_t* two_byte_string = AsciiToTwoByteString("test string");
- Local<String> string = String::NewExternal(new TestResource(two_byte_string));
- i::Handle<i::String> istring = v8::Utils::OpenHandle(*string);
- // Trigger GCs so that the newly allocated string moves to old gen.
- i::Heap::CollectGarbage(0, i::NEW_SPACE); // in survivor space now
- i::Heap::CollectGarbage(0, i::NEW_SPACE); // in old gen now
- i::Handle<i::String> isymbol = i::Factory::SymbolFromString(istring);
- CHECK(isymbol->IsSymbol());
+ {
+ v8::HandleScope scope;
+ uint16_t* two_byte_string = AsciiToTwoByteString("test string");
+ Local<String> string =
+ String::NewExternal(new TestResource(two_byte_string));
+ i::Handle<i::String> istring = v8::Utils::OpenHandle(*string);
+ // Trigger GCs so that the newly allocated string moves to old gen.
+ i::Heap::CollectGarbage(0, i::NEW_SPACE); // in survivor space now
+ i::Heap::CollectGarbage(0, i::NEW_SPACE); // in old gen now
+ i::Handle<i::String> isymbol = i::Factory::SymbolFromString(istring);
+ CHECK(isymbol->IsSymbol());
+ }
+ i::Heap::CollectAllGarbage();
}
THREADED_TEST(UsingExternalAsciiString) {
- v8::HandleScope scope;
- const char* one_byte_string = "test string";
- Local<String> string = String::NewExternal(
- new TestAsciiResource(i::StrDup(one_byte_string)));
- i::Handle<i::String> istring = v8::Utils::OpenHandle(*string);
- // Trigger GCs so that the newly allocated string moves to old gen.
- i::Heap::CollectGarbage(0, i::NEW_SPACE); // in survivor space now
- i::Heap::CollectGarbage(0, i::NEW_SPACE); // in old gen now
- i::Handle<i::String> isymbol = i::Factory::SymbolFromString(istring);
- CHECK(isymbol->IsSymbol());
+ {
+ v8::HandleScope scope;
+ const char* one_byte_string = "test string";
+ Local<String> string = String::NewExternal(
+ new TestAsciiResource(i::StrDup(one_byte_string)));
+ i::Handle<i::String> istring = v8::Utils::OpenHandle(*string);
+ // Trigger GCs so that the newly allocated string moves to old gen.
+ i::Heap::CollectGarbage(0, i::NEW_SPACE); // in survivor space now
+ i::Heap::CollectGarbage(0, i::NEW_SPACE); // in old gen now
+ i::Handle<i::String> isymbol = i::Factory::SymbolFromString(istring);
+ CHECK(isymbol->IsSymbol());
+ }
+ i::Heap::CollectAllGarbage();
}
@@ -1327,6 +1339,38 @@ THREADED_TEST(HiddenProperties) {
}
+static v8::Handle<Value> InterceptorForHiddenProperties(
+ Local<String> name, const AccessorInfo& info) {
+ // Make sure objects move.
+ bool saved_always_compact = i::FLAG_always_compact;
+ if (!i::FLAG_never_compact) {
+ i::FLAG_always_compact = true;
+ }
+ // The whole goal of this interceptor is to cause a GC during local property
+ // lookup.
+ i::Heap::CollectAllGarbage();
+ i::FLAG_always_compact = saved_always_compact;
+ return v8::Handle<Value>();
+}
+
+
+THREADED_TEST(HiddenPropertiesWithInterceptors) {
+ v8::HandleScope scope;
+ LocalContext context;
+
+ v8::Local<v8::String> key = v8_str("api-test::hidden-key");
+
+ // Associate an interceptor with an object and start setting hidden values.
+ Local<v8::FunctionTemplate> fun_templ = v8::FunctionTemplate::New();
+ Local<v8::ObjectTemplate> instance_templ = fun_templ->InstanceTemplate();
+ instance_templ->SetNamedPropertyHandler(InterceptorForHiddenProperties);
+ Local<v8::Function> function = fun_templ->GetFunction();
+ Local<v8::Object> obj = function->NewInstance();
+ CHECK(obj->SetHiddenValue(key, v8::Integer::New(2302)));
+ CHECK_EQ(2302, obj->GetHiddenValue(key)->Int32Value());
+}
+
+
THREADED_TEST(External) {
v8::HandleScope scope;
int x = 3;
@@ -4471,10 +4515,6 @@ THREADED_TEST(EvalAliasedDynamic) {
v8::HandleScope scope;
LocalContext current;
- // This sets 'global' to the real global object (as opposed to the
- // proxy). It is highly implementation dependent, so take care.
- current->Global()->Set(v8_str("global"), current->Global()->GetPrototype());
-
// Tests where aliased eval can only be resolved dynamically.
Local<Script> script =
Script::Compile(v8_str("function f(x) { "
@@ -4483,7 +4523,7 @@ THREADED_TEST(EvalAliasedDynamic) {
"}"
"foo = 0;"
"result1 = f(new Object());"
- "result2 = f(global);"
+ "result2 = f(this);"
"var x = new Object();"
"x.eval = function(x) { return 1; };"
"result3 = f(x);"));
@@ -4498,7 +4538,7 @@ THREADED_TEST(EvalAliasedDynamic) {
" var bar = 2;"
" with (x) { return eval('bar'); }"
"}"
- "f(global)"));
+ "f(this)"));
script->Run();
CHECK(try_catch.HasCaught());
try_catch.Reset();
@@ -4585,6 +4625,44 @@ THREADED_TEST(CrossEval) {
}
+// Test that calling eval in a context which has been detached from
+// its global throws an exception. This behavior is consistent with
+// other JavaScript implementations.
+THREADED_TEST(EvalInDetachedGlobal) {
+ v8::HandleScope scope;
+
+ v8::Persistent<Context> context0 = Context::New();
+ v8::Persistent<Context> context1 = Context::New();
+
+ // Setup function in context0 that uses eval from context0.
+ context0->Enter();
+ v8::Handle<v8::Value> fun =
+ CompileRun("var x = 42;"
+ "(function() {"
+ " var e = eval;"
+ " return function(s) { return e(s); }"
+ "})()");
+ context0->Exit();
+
+ // Put the function into context1 and call it before and after
+ // detaching the global. Before detaching, the call succeeds and
+ // after detaching and exception is thrown.
+ context1->Enter();
+ context1->Global()->Set(v8_str("fun"), fun);
+ v8::Handle<v8::Value> x_value = CompileRun("fun('x')");
+ CHECK_EQ(42, x_value->Int32Value());
+ context0->DetachGlobal();
+ v8::TryCatch catcher;
+ x_value = CompileRun("fun('x')");
+ CHECK(x_value.IsEmpty());
+ CHECK(catcher.HasCaught());
+ context1->Exit();
+
+ context1.Dispose();
+ context0.Dispose();
+}
+
+
THREADED_TEST(CrossLazyLoad) {
v8::HandleScope scope;
LocalContext other;
@@ -4607,6 +4685,12 @@ THREADED_TEST(CrossLazyLoad) {
static v8::Handle<Value> call_as_function(const v8::Arguments& args) {
ApiTestFuzzer::Fuzz();
+ if (args.IsConstructCall()) {
+ if (args[0]->IsInt32()) {
+ return v8_num(-args[0]->Int32Value());
+ }
+ }
+
return args[0];
}
@@ -4660,9 +4744,9 @@ THREADED_TEST(CallAsFunction) {
// Check that the call-as-function handler can be called through
// new. Currently, there is no way to check in the call-as-function
// handler if it has been called through new or not.
- value = CompileRun("new obj(42)");
+ value = CompileRun("new obj(43)");
CHECK(!try_catch.HasCaught());
- CHECK_EQ(42, value->Int32Value());
+ CHECK_EQ(-43, value->Int32Value());
}
@@ -4777,6 +4861,23 @@ THREADED_TEST(InterceptorHasOwnPropertyCausingGC) {
}
+typedef v8::Handle<Value> (*NamedPropertyGetter)(Local<String> property,
+ const AccessorInfo& info);
+
+
+static void CheckInterceptorLoadIC(NamedPropertyGetter getter,
+ const char* source,
+ int expected) {
+ v8::HandleScope scope;
+ v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New();
+ templ->SetNamedPropertyHandler(getter);
+ LocalContext context;
+ context->Global()->Set(v8_str("o"), templ->NewInstance());
+ v8::Handle<Value> value = CompileRun(source);
+ CHECK_EQ(expected, value->Int32Value());
+}
+
+
static v8::Handle<Value> InterceptorLoadICGetter(Local<String> name,
const AccessorInfo& info) {
ApiTestFuzzer::Fuzz();
@@ -4787,17 +4888,100 @@ static v8::Handle<Value> InterceptorLoadICGetter(Local<String> name,
// This test should hit the load IC for the interceptor case.
THREADED_TEST(InterceptorLoadIC) {
- v8::HandleScope scope;
- v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New();
- templ->SetNamedPropertyHandler(InterceptorLoadICGetter);
- LocalContext context;
- context->Global()->Set(v8_str("o"), templ->NewInstance());
- v8::Handle<Value> value = CompileRun(
+ CheckInterceptorLoadIC(InterceptorLoadICGetter,
"var result = 0;"
"for (var i = 0; i < 1000; i++) {"
" result = o.x;"
- "}");
- CHECK_EQ(42, value->Int32Value());
+ "}",
+ 42);
+}
+
+
+// Below go several tests which verify that JITing for various
+// configurations of interceptor and explicit fields works fine
+// (those cases are special cased to get better performance).
+
+static v8::Handle<Value> InterceptorLoadXICGetter(Local<String> name,
+ const AccessorInfo& info) {
+ ApiTestFuzzer::Fuzz();
+ return v8_str("x")->Equals(name)
+ ? v8::Integer::New(42) : v8::Handle<v8::Value>();
+}
+
+
+THREADED_TEST(InterceptorLoadICWithFieldOnHolder) {
+ CheckInterceptorLoadIC(InterceptorLoadXICGetter,
+ "var result = 0;"
+ "o.y = 239;"
+ "for (var i = 0; i < 1000; i++) {"
+ " result = o.y;"
+ "}",
+ 239);
+}
+
+
+THREADED_TEST(InterceptorLoadICWithSubstitutedProto) {
+ CheckInterceptorLoadIC(InterceptorLoadXICGetter,
+ "var result = 0;"
+ "o.__proto__ = { 'y': 239 };"
+ "for (var i = 0; i < 1000; i++) {"
+ " result = o.y + o.x;"
+ "}",
+ 239 + 42);
+}
+
+
+THREADED_TEST(InterceptorLoadICWithPropertyOnProto) {
+ CheckInterceptorLoadIC(InterceptorLoadXICGetter,
+ "var result = 0;"
+ "o.__proto__.y = 239;"
+ "for (var i = 0; i < 1000; i++) {"
+ " result = o.y + o.x;"
+ "}",
+ 239 + 42);
+}
+
+
+THREADED_TEST(InterceptorLoadICUndefined) {
+ CheckInterceptorLoadIC(InterceptorLoadXICGetter,
+ "var result = 0;"
+ "for (var i = 0; i < 1000; i++) {"
+ " result = (o.y == undefined) ? 239 : 42;"
+ "}",
+ 239);
+}
+
+
+THREADED_TEST(InterceptorLoadICWithOverride) {
+ CheckInterceptorLoadIC(InterceptorLoadXICGetter,
+ "fst = new Object(); fst.__proto__ = o;"
+ "snd = new Object(); snd.__proto__ = fst;"
+ "var result1 = 0;"
+ "for (var i = 0; i < 1000; i++) {"
+ " result1 = snd.x;"
+ "}"
+ "fst.x = 239;"
+ "var result = 0;"
+ "for (var i = 0; i < 1000; i++) {"
+ " result = snd.x;"
+ "}"
+ "result + result1",
+ 239 + 42);
+}
+
+
+static v8::Handle<Value> InterceptorLoadICGetter0(Local<String> name,
+ const AccessorInfo& info) {
+ ApiTestFuzzer::Fuzz();
+ CHECK(v8_str("x")->Equals(name));
+ return v8::Integer::New(0);
+}
+
+
+THREADED_TEST(InterceptorReturningZero) {
+ CheckInterceptorLoadIC(InterceptorLoadICGetter0,
+ "o.x == undefined ? 1 : 0",
+ 0);
}
@@ -6571,3 +6755,135 @@ TEST(ForceSetWithInterceptor) {
CHECK_EQ(1, force_set_set_count);
CHECK_EQ(6, force_set_get_count);
}
+
+
+THREADED_TEST(ForceDelete) {
+ v8::HandleScope scope;
+ v8::Handle<v8::ObjectTemplate> templ = v8::ObjectTemplate::New();
+ LocalContext context(NULL, templ);
+ v8::Handle<v8::Object> global = context->Global();
+
+ // Ordinary properties
+ v8::Handle<v8::String> simple_property = v8::String::New("p");
+ global->Set(simple_property, v8::Int32::New(4), v8::DontDelete);
+ CHECK_EQ(4, global->Get(simple_property)->Int32Value());
+ // This should fail because the property is dont-delete.
+ CHECK(!global->Delete(simple_property));
+ CHECK_EQ(4, global->Get(simple_property)->Int32Value());
+ // This should succeed even though the property is dont-delete.
+ CHECK(global->ForceDelete(simple_property));
+ CHECK(global->Get(simple_property)->IsUndefined());
+}
+
+
+static int force_delete_interceptor_count = 0;
+static bool pass_on_delete = false;
+
+
+static v8::Handle<v8::Boolean> ForceDeleteDeleter(
+ v8::Local<v8::String> name,
+ const v8::AccessorInfo& info) {
+ force_delete_interceptor_count++;
+ if (pass_on_delete) {
+ return v8::Handle<v8::Boolean>();
+ } else {
+ return v8::True();
+ }
+}
+
+
+THREADED_TEST(ForceDeleteWithInterceptor) {
+ force_delete_interceptor_count = 0;
+ pass_on_delete = false;
+
+ v8::HandleScope scope;
+ v8::Handle<v8::ObjectTemplate> templ = v8::ObjectTemplate::New();
+ templ->SetNamedPropertyHandler(0, 0, 0, ForceDeleteDeleter);
+ LocalContext context(NULL, templ);
+ v8::Handle<v8::Object> global = context->Global();
+
+ v8::Handle<v8::String> some_property = v8::String::New("a");
+ global->Set(some_property, v8::Integer::New(42), v8::DontDelete);
+
+ // Deleting a property should get intercepted and nothing should
+ // happen.
+ CHECK_EQ(0, force_delete_interceptor_count);
+ CHECK(global->Delete(some_property));
+ CHECK_EQ(1, force_delete_interceptor_count);
+ CHECK_EQ(42, global->Get(some_property)->Int32Value());
+ // Deleting the property when the interceptor returns an empty
+ // handle should not delete the property since it is DontDelete.
+ pass_on_delete = true;
+ CHECK(!global->Delete(some_property));
+ CHECK_EQ(2, force_delete_interceptor_count);
+ CHECK_EQ(42, global->Get(some_property)->Int32Value());
+ // Forcing the property to be deleted should delete the value
+ // without calling the interceptor.
+ CHECK(global->ForceDelete(some_property));
+ CHECK(global->Get(some_property)->IsUndefined());
+ CHECK_EQ(2, force_delete_interceptor_count);
+}
+
+
+v8::Persistent<Context> calling_context0;
+v8::Persistent<Context> calling_context1;
+v8::Persistent<Context> calling_context2;
+
+
+// Check that the call to the callback is initiated in
+// calling_context2, the directly calling context is calling_context1
+// and the callback itself is in calling_context0.
+static v8::Handle<Value> GetCallingContextCallback(const v8::Arguments& args) {
+ ApiTestFuzzer::Fuzz();
+ CHECK(Context::GetCurrent() == calling_context0);
+ CHECK(Context::GetCalling() == calling_context1);
+ CHECK(Context::GetEntered() == calling_context2);
+ return v8::Integer::New(42);
+}
+
+
+THREADED_TEST(GetCallingContext) {
+ v8::HandleScope scope;
+
+ calling_context0 = Context::New();
+ calling_context1 = Context::New();
+ calling_context2 = Context::New();
+
+ // Allow cross-domain access.
+ Local<String> token = v8_str("<security token>");
+ calling_context0->SetSecurityToken(token);
+ calling_context1->SetSecurityToken(token);
+ calling_context2->SetSecurityToken(token);
+
+ // Create an object with a C++ callback in context0.
+ calling_context0->Enter();
+ Local<v8::FunctionTemplate> callback_templ =
+ v8::FunctionTemplate::New(GetCallingContextCallback);
+ calling_context0->Global()->Set(v8_str("callback"),
+ callback_templ->GetFunction());
+ calling_context0->Exit();
+
+ // Expose context0 in context1 and setup a function that calls the
+ // callback function.
+ calling_context1->Enter();
+ calling_context1->Global()->Set(v8_str("context0"),
+ calling_context0->Global());
+ CompileRun("function f() { context0.callback() }");
+ calling_context1->Exit();
+
+ // Expose context1 in context2 and call the callback function in
+ // context0 indirectly through f in context1.
+ calling_context2->Enter();
+ calling_context2->Global()->Set(v8_str("context1"),
+ calling_context1->Global());
+ CompileRun("context1.f()");
+ calling_context2->Exit();
+
+ // Dispose the contexts to allow them to be garbage collected.
+ calling_context0.Dispose();
+ calling_context1.Dispose();
+ calling_context2.Dispose();
+ calling_context0.Clear();
+ calling_context1.Clear();
+ calling_context2.Clear();
+}
diff --git a/deps/v8/test/cctest/test-assembler-x64.cc b/deps/v8/test/cctest/test-assembler-x64.cc
new file mode 100644
index 000000000..43ba4e913
--- /dev/null
+++ b/deps/v8/test/cctest/test-assembler-x64.cc
@@ -0,0 +1,251 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include <stdlib.h>
+
+#include "v8.h"
+
+#include "macro-assembler.h"
+#include "factory.h"
+#include "platform.h"
+#include "serialize.h"
+#include "cctest.h"
+
+using v8::internal::byte;
+using v8::internal::OS;
+using v8::internal::Assembler;
+using v8::internal::Operand;
+using v8::internal::Immediate;
+using v8::internal::Label;
+using v8::internal::rax;
+using v8::internal::rsi;
+using v8::internal::rdi;
+using v8::internal::rbp;
+using v8::internal::rsp;
+using v8::internal::FUNCTION_CAST;
+using v8::internal::CodeDesc;
+using v8::internal::less_equal;
+using v8::internal::not_equal;
+using v8::internal::greater;
+
+
+// Test the x64 assembler by compiling some simple functions into
+// a buffer and executing them. These tests do not initialize the
+// V8 library, create a context, or use any V8 objects.
+// The AMD64 calling convention is used, with the first five arguments
+// in RSI, RDI, RDX, RCX, R8, and R9, and floating point arguments in
+// the XMM registers. The return value is in RAX.
+// This calling convention is used on Linux, with GCC, and on Mac OS,
+// with GCC. A different convention is used on 64-bit windows.
+
+typedef int (*F0)();
+typedef int (*F1)(int x);
+typedef int (*F2)(int x, int y);
+
+#define __ assm.
+
+
+TEST(AssemblerX64ReturnOperation) {
+ // Allocate an executable page of memory.
+ size_t actual_size;
+ byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
+ &actual_size,
+ true));
+ CHECK(buffer);
+ Assembler assm(buffer, actual_size);
+
+ // Assemble a simple function that copies argument 2 and returns it.
+ __ movq(rax, rsi);
+ __ nop();
+ __ ret(0);
+
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ // Call the function from C++.
+ int result = FUNCTION_CAST<F2>(buffer)(3, 2);
+ CHECK_EQ(2, result);
+}
+
+TEST(AssemblerX64StackOperations) {
+ // Allocate an executable page of memory.
+ size_t actual_size;
+ byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
+ &actual_size,
+ true));
+ CHECK(buffer);
+ Assembler assm(buffer, actual_size);
+
+ // Assemble a simple function that copies argument 2 and returns it.
+ // We compile without stack frame pointers, so the gdb debugger shows
+ // incorrect stack frames when debugging this function (which has them).
+ __ push(rbp);
+ __ movq(rbp, rsp);
+ __ push(rsi); // Value at (rbp - 8)
+ __ push(rsi); // Value at (rbp - 16)
+ __ push(rdi); // Value at (rbp - 24)
+ __ pop(rax);
+ __ pop(rax);
+ __ pop(rax);
+ __ pop(rbp);
+ __ nop();
+ __ ret(0);
+
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ // Call the function from C++.
+ int result = FUNCTION_CAST<F2>(buffer)(3, 2);
+ CHECK_EQ(2, result);
+}
+
+TEST(AssemblerX64ArithmeticOperations) {
+ // Allocate an executable page of memory.
+ size_t actual_size;
+ byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
+ &actual_size,
+ true));
+ CHECK(buffer);
+ Assembler assm(buffer, actual_size);
+
+ // Assemble a simple function that copies argument 2 and returns it.
+ __ movq(rax, rsi);
+ __ add(rax, rdi);
+ __ ret(0);
+
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ // Call the function from C++.
+ int result = FUNCTION_CAST<F2>(buffer)(3, 2);
+ CHECK_EQ(5, result);
+}
+
+TEST(AssemblerX64MemoryOperands) {
+ // Allocate an executable page of memory.
+ size_t actual_size;
+ byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
+ &actual_size,
+ true));
+ CHECK(buffer);
+ Assembler assm(buffer, actual_size);
+
+ // Assemble a simple function that copies argument 2 and returns it.
+ __ push(rbp);
+ __ movq(rbp, rsp);
+ __ push(rsi); // Value at (rbp - 8)
+ __ push(rsi); // Value at (rbp - 16)
+ __ push(rdi); // Value at (rbp - 24)
+ const int kStackElementSize = 8;
+ __ movq(rax, Operand(rbp, -3 * kStackElementSize));
+ __ pop(rsi);
+ __ pop(rsi);
+ __ pop(rsi);
+ __ pop(rbp);
+ __ nop();
+ __ ret(0);
+
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ // Call the function from C++.
+ int result = FUNCTION_CAST<F2>(buffer)(3, 2);
+ CHECK_EQ(3, result);
+}
+
+TEST(AssemblerX64ControlFlow) {
+ // Allocate an executable page of memory.
+ size_t actual_size;
+ byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
+ &actual_size,
+ true));
+ CHECK(buffer);
+ Assembler assm(buffer, actual_size);
+
+ // Assemble a simple function that copies argument 2 and returns it.
+ __ push(rbp);
+ __ movq(rbp, rsp);
+ __ movq(rax, rdi);
+ Label target;
+ __ jmp(&target);
+ __ movq(rax, rsi);
+ __ bind(&target);
+ __ pop(rbp);
+ __ ret(0);
+
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ // Call the function from C++.
+ int result = FUNCTION_CAST<F2>(buffer)(3, 2);
+ CHECK_EQ(3, result);
+}
+
+TEST(AssemblerX64LoopImmediates) {
+ // Allocate an executable page of memory.
+ size_t actual_size;
+ byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
+ &actual_size,
+ true));
+ CHECK(buffer);
+ Assembler assm(buffer, actual_size);
+ // Assemble two loops using rax as counter, and verify the ending counts.
+ Label Fail;
+ __ movq(rax, Immediate(-3));
+ Label Loop1_test;
+ Label Loop1_body;
+ __ jmp(&Loop1_test);
+ __ bind(&Loop1_body);
+ __ add(rax, Immediate(7));
+ __ bind(&Loop1_test);
+ __ cmp(rax, Immediate(20));
+ __ j(less_equal, &Loop1_body);
+ // Did the loop terminate with the expected value?
+ __ cmp(rax, Immediate(25));
+ __ j(not_equal, &Fail);
+
+ Label Loop2_test;
+ Label Loop2_body;
+ __ movq(rax, Immediate(0x11FEED00));
+ __ jmp(&Loop2_test);
+ __ bind(&Loop2_body);
+ __ add(rax, Immediate(-0x1100));
+ __ bind(&Loop2_test);
+ __ cmp(rax, Immediate(0x11FE8000));
+ __ j(greater, &Loop2_body);
+ // Did the loop terminate with the expected value?
+ __ cmp(rax, Immediate(0x11FE7600));
+ __ j(not_equal, &Fail);
+
+ __ movq(rax, Immediate(1));
+ __ ret(0);
+ __ bind(&Fail);
+ __ movq(rax, Immediate(0));
+ __ ret(0);
+
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ // Call the function from C++.
+ int result = FUNCTION_CAST<F0>(buffer)();
+ CHECK_EQ(1, result);
+}
+#undef __
diff --git a/deps/v8/test/cctest/test-debug.cc b/deps/v8/test/cctest/test-debug.cc
index 288efbaed..92f48e1cf 100644
--- a/deps/v8/test/cctest/test-debug.cc
+++ b/deps/v8/test/cctest/test-debug.cc
@@ -30,6 +30,7 @@
#include "v8.h"
#include "api.h"
+#include "compilation-cache.h"
#include "debug.h"
#include "platform.h"
#include "stub-cache.h"
@@ -370,7 +371,8 @@ static void PrepareStep(StepAction step_action) {
// This function is in namespace v8::internal to be friend with class
// v8::internal::Debug.
-namespace v8 { namespace internal { // NOLINT
+namespace v8 {
+namespace internal {
// Collect the currently debugged functions.
Handle<FixedArray> GetDebuggedFunctions() {
@@ -3311,6 +3313,82 @@ TEST(HiddenPrototypePropertyMirror) {
}
+static v8::Handle<v8::Value> ProtperyXNativeGetter(
+ v8::Local<v8::String> property, const v8::AccessorInfo& info) {
+ return v8::Integer::New(10);
+}
+
+
+TEST(NativeGetterPropertyMirror) {
+ // Create a V8 environment with debug access.
+ v8::HandleScope scope;
+ DebugLocalContext env;
+ env.ExposeDebug();
+
+ v8::Handle<v8::String> name = v8::String::New("x");
+ // Create object with named accessor.
+ v8::Handle<v8::ObjectTemplate> named = v8::ObjectTemplate::New();
+ named->SetAccessor(name, &ProtperyXNativeGetter, NULL,
+ v8::Handle<v8::Value>(), v8::DEFAULT, v8::None);
+
+ // Create object with named property getter.
+ env->Global()->Set(v8::String::New("instance"), named->NewInstance());
+ CHECK_EQ(10, CompileRun("instance.x")->Int32Value());
+
+ // Get mirror for the object with property getter.
+ CompileRun("instance_mirror = debug.MakeMirror(instance);");
+ CHECK(CompileRun(
+ "instance_mirror instanceof debug.ObjectMirror")->BooleanValue());
+
+ CompileRun("named_names = instance_mirror.propertyNames();");
+ CHECK_EQ(1, CompileRun("named_names.length")->Int32Value());
+ CHECK(CompileRun("named_names[0] == 'x'")->BooleanValue());
+ CHECK(CompileRun(
+ "instance_mirror.property('x').value().isNumber()")->BooleanValue());
+ CHECK(CompileRun(
+ "instance_mirror.property('x').value().value() == 10")->BooleanValue());
+}
+
+
+static v8::Handle<v8::Value> ProtperyXNativeGetterThrowingError(
+ v8::Local<v8::String> property, const v8::AccessorInfo& info) {
+ return CompileRun("throw new Error('Error message');");
+}
+
+
+TEST(NativeGetterThrowingErrorPropertyMirror) {
+ // Create a V8 environment with debug access.
+ v8::HandleScope scope;
+ DebugLocalContext env;
+ env.ExposeDebug();
+
+ v8::Handle<v8::String> name = v8::String::New("x");
+ // Create object with named accessor.
+ v8::Handle<v8::ObjectTemplate> named = v8::ObjectTemplate::New();
+ named->SetAccessor(name, &ProtperyXNativeGetterThrowingError, NULL,
+ v8::Handle<v8::Value>(), v8::DEFAULT, v8::None);
+
+ // Create object with named property getter.
+ env->Global()->Set(v8::String::New("instance"), named->NewInstance());
+
+ // Get mirror for the object with property getter.
+ CompileRun("instance_mirror = debug.MakeMirror(instance);");
+ CHECK(CompileRun(
+ "instance_mirror instanceof debug.ObjectMirror")->BooleanValue());
+ CompileRun("named_names = instance_mirror.propertyNames();");
+ CHECK_EQ(1, CompileRun("named_names.length")->Int32Value());
+ CHECK(CompileRun("named_names[0] == 'x'")->BooleanValue());
+ CHECK(CompileRun(
+ "instance_mirror.property('x').value().isError()")->BooleanValue());
+
+ // Check that the message is that passed to the Error constructor.
+ CHECK(CompileRun(
+ "instance_mirror.property('x').value().message() == 'Error message'")->
+ BooleanValue());
+}
+
+
+
// Multithreaded tests of JSON debugger protocol
// Support classes
@@ -4166,11 +4244,8 @@ TEST(DebuggerUnload) {
}
-// Debugger message handler which counts the number of times it is called.
-static int message_handler_hit_count = 0;
-static void MessageHandlerHitCount(const v8::Debug::Message& message) {
- message_handler_hit_count++;
-
+// Sends continue command to the debugger.
+static void SendContinueCommand() {
const int kBufferSize = 1000;
uint16_t buffer[kBufferSize];
const char* command_continue =
@@ -4182,6 +4257,15 @@ static void MessageHandlerHitCount(const v8::Debug::Message& message) {
}
+// Debugger message handler which counts the number of times it is called.
+static int message_handler_hit_count = 0;
+static void MessageHandlerHitCount(const v8::Debug::Message& message) {
+ message_handler_hit_count++;
+
+ SendContinueCommand();
+}
+
+
// Test clearing the debug message handler.
TEST(DebuggerClearMessageHandler) {
v8::HandleScope scope;
@@ -4525,6 +4609,8 @@ class EmptyExternalStringResource : public v8::String::ExternalStringResource {
TEST(DebugGetLoadedScripts) {
v8::HandleScope scope;
DebugLocalContext env;
+ env.ExposeDebug();
+
EmptyExternalStringResource source_ext_str;
v8::Local<v8::String> source = v8::String::NewExternal(&source_ext_str);
v8::Handle<v8::Script> evil_script = v8::Script::Compile(source);
@@ -4538,11 +4624,15 @@ TEST(DebugGetLoadedScripts) {
i::FLAG_allow_natives_syntax = true;
CompileRun(
"var scripts = %DebugGetLoadedScripts();"
- "for (var i = 0; i < scripts.length; ++i) {"
- " scripts[i].line_ends;"
+ "var count = scripts.length;"
+ "for (var i = 0; i < count; ++i) {"
+ " scripts[i].line_ends;"
"}");
// Must not crash while accessing line_ends.
i::FLAG_allow_natives_syntax = allow_natives_syntax;
+
+ // Some scripts are retrieved - at least the number of native scripts.
+ CHECK_GT((*env)->Global()->Get(v8::String::New("count"))->Int32Value(), 8);
}
@@ -4574,7 +4664,6 @@ TEST(ScriptNameAndData) {
v8::Handle<v8::Script> script1 = v8::Script::Compile(script, &origin1);
script1->SetData(v8::String::New("data"));
script1->Run();
- v8::Script::Compile(script, &origin1)->Run();
v8::Local<v8::Function> f;
f = v8::Local<v8::Function>::Cast(env->Global()->Get(v8::String::New("f")));
@@ -4583,6 +4672,15 @@ TEST(ScriptNameAndData) {
CHECK_EQ("name", last_script_name_hit);
CHECK_EQ("data", last_script_data_hit);
+ // Compile the same script again without setting data. As the compilation
+ // cache is disabled when debugging expect the data to be missing.
+ v8::Script::Compile(script, &origin1)->Run();
+ f = v8::Local<v8::Function>::Cast(env->Global()->Get(v8::String::New("f")));
+ f->Call(env->Global(), 0, NULL);
+ CHECK_EQ(2, break_point_hit_count);
+ CHECK_EQ("name", last_script_name_hit);
+ CHECK_EQ("", last_script_data_hit); // Undefined results in empty string.
+
v8::Local<v8::String> data_obj_source = v8::String::New(
"({ a: 'abc',\n"
" b: 123,\n"
@@ -4595,7 +4693,7 @@ TEST(ScriptNameAndData) {
script2->SetData(data_obj);
f = v8::Local<v8::Function>::Cast(env->Global()->Get(v8::String::New("f")));
f->Call(env->Global(), 0, NULL);
- CHECK_EQ(2, break_point_hit_count);
+ CHECK_EQ(3, break_point_hit_count);
CHECK_EQ("new name", last_script_name_hit);
CHECK_EQ("abc 123", last_script_data_hit);
}
@@ -4612,16 +4710,9 @@ static void ContextCheckMessageHandler(const v8::Debug::Message& message) {
expected_context_data));
message_handler_hit_count++;
- const int kBufferSize = 1000;
- uint16_t buffer[kBufferSize];
- const char* command_continue =
- "{\"seq\":0,"
- "\"type\":\"request\","
- "\"command\":\"continue\"}";
-
// Send a continue command for break events.
if (message.GetEvent() == v8::Break) {
- v8::Debug::SendCommand(buffer, AsciiToUtf16(command_continue, buffer));
+ SendContinueCommand();
}
}
@@ -4679,4 +4770,418 @@ TEST(ContextData) {
// Two times compile event and two times break event.
CHECK_GT(message_handler_hit_count, 4);
+
+ v8::Debug::SetMessageHandler2(NULL);
+ CheckDebuggerUnloaded();
+}
+
+
+// Debug message handler which issues a debug break when it hits a break event.
+static int message_handler_break_hit_count = 0;
+static void DebugBreakMessageHandler(const v8::Debug::Message& message) {
+ // Schedule a debug break for break events.
+ if (message.IsEvent() && message.GetEvent() == v8::Break) {
+ message_handler_break_hit_count++;
+ if (message_handler_break_hit_count == 1) {
+ v8::Debug::DebugBreak();
+ }
+ }
+
+ // Issue a continue command if this event will not cause the VM to start
+ // running.
+ if (!message.WillStartRunning()) {
+ SendContinueCommand();
+ }
+}
+
+
+// Test that a debug break can be scheduled while in a message handler.
+TEST(DebugBreakInMessageHandler) {
+ v8::HandleScope scope;
+ DebugLocalContext env;
+
+ v8::Debug::SetMessageHandler2(DebugBreakMessageHandler);
+
+ // Test functions.
+ const char* script = "function f() { debugger; } function g() { }";
+ CompileRun(script);
+ v8::Local<v8::Function> f =
+ v8::Local<v8::Function>::Cast(env->Global()->Get(v8::String::New("f")));
+ v8::Local<v8::Function> g =
+ v8::Local<v8::Function>::Cast(env->Global()->Get(v8::String::New("g")));
+
+ // Call f then g. The debugger statement in f will casue a break which will
+ // cause another break.
+ f->Call(env->Global(), 0, NULL);
+ CHECK_EQ(2, message_handler_break_hit_count);
+ // Calling g will not cause any additional breaks.
+ g->Call(env->Global(), 0, NULL);
+ CHECK_EQ(2, message_handler_break_hit_count);
+}
+
+
+// Debug event handler which gets the function on the top frame and schedules a
+// break a number of times.
+static void DebugEventDebugBreak(
+ v8::DebugEvent event,
+ v8::Handle<v8::Object> exec_state,
+ v8::Handle<v8::Object> event_data,
+ v8::Handle<v8::Value> data) {
+
+ if (event == v8::Break) {
+ break_point_hit_count++;
+
+ // Get the name of the top frame function.
+ if (!frame_function_name.IsEmpty()) {
+ // Get the name of the function.
+ const int argc = 1;
+ v8::Handle<v8::Value> argv[argc] = { exec_state };
+ v8::Handle<v8::Value> result = frame_function_name->Call(exec_state,
+ argc, argv);
+ if (result->IsUndefined()) {
+ last_function_hit[0] = '\0';
+ } else {
+ CHECK(result->IsString());
+ v8::Handle<v8::String> function_name(result->ToString());
+ function_name->WriteAscii(last_function_hit);
+ }
+ }
+
+ // Keep forcing breaks.
+ if (break_point_hit_count < 20) {
+ v8::Debug::DebugBreak();
+ }
+ }
+}
+
+
+TEST(RegExpDebugBreak) {
+ v8::HandleScope scope;
+ DebugLocalContext env;
+
+ i::FLAG_regexp_native = true;
+
+ // Create a function for checking the function when hitting a break point.
+ frame_function_name = CompileFunction(&env,
+ frame_function_name_source,
+ "frame_function_name");
+
+ // Test RegExp which matches white spaces and comments at the begining of a
+ // source line.
+ const char* script =
+ "var sourceLineBeginningSkip = /^(?:[ \\v\\h]*(?:\\/\\*.*?\\*\\/)*)*/;\n"
+ "function f(s) { return s.match(sourceLineBeginningSkip)[0].length; }";
+
+ v8::Local<v8::Function> f = CompileFunction(script, "f");
+ const int argc = 1;
+ v8::Handle<v8::Value> argv[argc] = { v8::String::New(" /* xxx */ a=0;") };
+ v8::Local<v8::Value> result = f->Call(env->Global(), argc, argv);
+ CHECK_EQ(12, result->Int32Value());
+
+ v8::Debug::SetDebugEventListener(DebugEventDebugBreak);
+ v8::Debug::DebugBreak();
+ result = f->Call(env->Global(), argc, argv);
+
+ CHECK_EQ(20, break_point_hit_count);
+ CHECK_EQ("exec", last_function_hit);
+}
+
+
+// Common part of EvalContextData and NestedBreakEventContextData tests.
+static void ExecuteScriptForContextCheck() {
+ // Create a context.
+ v8::Persistent<v8::Context> context_1;
+ v8::Handle<v8::ObjectTemplate> global_template =
+ v8::Handle<v8::ObjectTemplate>();
+ v8::Handle<v8::Value> global_object = v8::Handle<v8::Value>();
+ context_1 = v8::Context::New(NULL, global_template, global_object);
+
+ // Default data value is undefined.
+ CHECK(context_1->GetData()->IsUndefined());
+
+ // Set and check a data value.
+ v8::Handle<v8::Value> data_1 = v8::Number::New(1);
+ context_1->SetData(data_1);
+ CHECK(context_1->GetData()->StrictEquals(data_1));
+
+ // Simple test function with eval that causes a break.
+ const char* source = "function f() { eval('debugger;'); }";
+
+ // Enter and run function in the context.
+ {
+ v8::Context::Scope context_scope(context_1);
+ expected_context = context_1;
+ expected_context_data = data_1;
+ v8::Local<v8::Function> f = CompileFunction(source, "f");
+ f->Call(context_1->Global(), 0, NULL);
+ }
+}
+
+
+// Test which creates a context and sets embedder data on it. Checks that this
+// data is set correctly and that when the debug message handler is called for
+// break event in an eval statement the expected context is the one returned by
+// Message.GetEventContext.
+TEST(EvalContextData) {
+ v8::HandleScope scope;
+ v8::Debug::SetMessageHandler2(ContextCheckMessageHandler);
+
+ ExecuteScriptForContextCheck();
+
+ // One time compile event and one time break event.
+ CHECK_GT(message_handler_hit_count, 2);
+ v8::Debug::SetMessageHandler2(NULL);
+ CheckDebuggerUnloaded();
+}
+
+
+static bool sent_eval = false;
+static int break_count = 0;
+static int continue_command_send_count = 0;
+// Check that the expected context is the one generating the debug event
+// including the case of nested break event.
+static void DebugEvalContextCheckMessageHandler(
+ const v8::Debug::Message& message) {
+ CHECK(message.GetEventContext() == expected_context);
+ CHECK(message.GetEventContext()->GetData()->StrictEquals(
+ expected_context_data));
+ message_handler_hit_count++;
+
+ if (message.IsEvent() && message.GetEvent() == v8::Break) {
+ break_count++;
+ if (!sent_eval) {
+ sent_eval = true;
+
+ const int kBufferSize = 1000;
+ uint16_t buffer[kBufferSize];
+ const char* eval_command =
+ "{\"seq\":0,"
+ "\"type\":\"request\","
+ "\"command\":\"evaluate\","
+ "arguments:{\"expression\":\"debugger;\","
+ "\"global\":true,\"disable_break\":false}}";
+
+ // Send evaluate command.
+ v8::Debug::SendCommand(buffer, AsciiToUtf16(eval_command, buffer));
+ return;
+ } else {
+ // It's a break event caused by the evaluation request above.
+ SendContinueCommand();
+ continue_command_send_count++;
+ }
+ } else if (message.IsResponse() && continue_command_send_count < 2) {
+ // Response to the evaluation request. We're still on the breakpoint so
+ // send continue.
+ SendContinueCommand();
+ continue_command_send_count++;
+ }
+}
+
+
+// Tests that context returned for break event is correct when the event occurs
+// in 'evaluate' debugger request.
+TEST(NestedBreakEventContextData) {
+ v8::HandleScope scope;
+ break_count = 0;
+ message_handler_hit_count = 0;
+ v8::Debug::SetMessageHandler2(DebugEvalContextCheckMessageHandler);
+
+ ExecuteScriptForContextCheck();
+
+ // One time compile event and two times break event.
+ CHECK_GT(message_handler_hit_count, 3);
+
+ // One break from the source and another from the evaluate request.
+ CHECK_EQ(break_count, 2);
+ v8::Debug::SetMessageHandler2(NULL);
+ CheckDebuggerUnloaded();
+}
+
+
+// Debug event listener which counts the script collected events.
+int script_collected_count = 0;
+static void DebugEventScriptCollectedEvent(v8::DebugEvent event,
+ v8::Handle<v8::Object> exec_state,
+ v8::Handle<v8::Object> event_data,
+ v8::Handle<v8::Value> data) {
+ // Count the number of breaks.
+ if (event == v8::ScriptCollected) {
+ script_collected_count++;
+ }
+}
+
+
+// Test that scripts collected are reported through the debug event listener.
+TEST(ScriptCollectedEvent) {
+ break_point_hit_count = 0;
+ script_collected_count = 0;
+ v8::HandleScope scope;
+ DebugLocalContext env;
+
+ // Request the loaded scripts to initialize the debugger script cache.
+ Debug::GetLoadedScripts();
+
+ // Do garbage collection to ensure that only the script in this test will be
+ // collected afterwards.
+ Heap::CollectAllGarbage();
+
+ script_collected_count = 0;
+ v8::Debug::SetDebugEventListener(DebugEventScriptCollectedEvent,
+ v8::Undefined());
+ {
+ v8::Script::Compile(v8::String::New("eval('a=1')"))->Run();
+ v8::Script::Compile(v8::String::New("eval('a=2')"))->Run();
+ }
+
+ // Do garbage collection to collect the script above which is no longer
+ // referenced.
+ Heap::CollectAllGarbage();
+
+ CHECK_EQ(2, script_collected_count);
+
+ v8::Debug::SetDebugEventListener(NULL);
+ CheckDebuggerUnloaded();
+}
+
+
+// Debug event listener which counts the script collected events.
+int script_collected_message_count = 0;
+static void ScriptCollectedMessageHandler(const v8::Debug::Message& message) {
+ // Count the number of scripts collected.
+ if (message.IsEvent() && message.GetEvent() == v8::ScriptCollected) {
+ script_collected_message_count++;
+ v8::Handle<v8::Context> context = message.GetEventContext();
+ CHECK(context.IsEmpty());
+ }
+}
+
+
+// Test that GetEventContext doesn't fail and return empty handle for
+// ScriptCollected events.
+TEST(ScriptCollectedEventContext) {
+ script_collected_message_count = 0;
+ v8::HandleScope scope;
+
+ { // Scope for the DebugLocalContext.
+ DebugLocalContext env;
+
+ // Request the loaded scripts to initialize the debugger script cache.
+ Debug::GetLoadedScripts();
+
+ // Do garbage collection to ensure that only the script in this test will be
+ // collected afterwards.
+ Heap::CollectAllGarbage();
+
+ v8::Debug::SetMessageHandler2(ScriptCollectedMessageHandler);
+ {
+ v8::Script::Compile(v8::String::New("eval('a=1')"))->Run();
+ v8::Script::Compile(v8::String::New("eval('a=2')"))->Run();
+ }
+ }
+
+ // Do garbage collection to collect the script above which is no longer
+ // referenced.
+ Heap::CollectAllGarbage();
+
+ CHECK_EQ(2, script_collected_message_count);
+
+ v8::Debug::SetMessageHandler2(NULL);
+}
+
+
+// Debug event listener which counts the after compile events.
+int after_compile_message_count = 0;
+static void AfterCompileMessageHandler(const v8::Debug::Message& message) {
+ // Count the number of scripts collected.
+ if (message.IsEvent()) {
+ if (message.GetEvent() == v8::AfterCompile) {
+ after_compile_message_count++;
+ } else if (message.GetEvent() == v8::Break) {
+ SendContinueCommand();
+ }
+ }
+}
+
+
+// Tests that after compile event is sent as many times as there are scripts
+// compiled.
+TEST(AfterCompileMessageWhenMessageHandlerIsReset) {
+ v8::HandleScope scope;
+ DebugLocalContext env;
+ after_compile_message_count = 0;
+ const char* script = "var a=1";
+
+ v8::Debug::SetMessageHandler2(AfterCompileMessageHandler);
+ v8::Script::Compile(v8::String::New(script))->Run();
+ v8::Debug::SetMessageHandler2(NULL);
+
+ v8::Debug::SetMessageHandler2(AfterCompileMessageHandler);
+ v8::Debug::DebugBreak();
+ v8::Script::Compile(v8::String::New(script))->Run();
+
+ // Setting listener to NULL should cause debugger unload.
+ v8::Debug::SetMessageHandler2(NULL);
+ CheckDebuggerUnloaded();
+
+ // Compilation cache should be disabled when debugger is active.
+ CHECK_EQ(2, after_compile_message_count);
+}
+
+
+// Tests that break event is sent when message handler is reset.
+TEST(BreakMessageWhenMessageHandlerIsReset) {
+ v8::HandleScope scope;
+ DebugLocalContext env;
+ after_compile_message_count = 0;
+ const char* script = "function f() {};";
+
+ v8::Debug::SetMessageHandler2(AfterCompileMessageHandler);
+ v8::Script::Compile(v8::String::New(script))->Run();
+ v8::Debug::SetMessageHandler2(NULL);
+
+ v8::Debug::SetMessageHandler2(AfterCompileMessageHandler);
+ v8::Debug::DebugBreak();
+ v8::Local<v8::Function> f =
+ v8::Local<v8::Function>::Cast(env->Global()->Get(v8::String::New("f")));
+ f->Call(env->Global(), 0, NULL);
+
+ // Setting message handler to NULL should cause debugger unload.
+ v8::Debug::SetMessageHandler2(NULL);
+ CheckDebuggerUnloaded();
+
+ // Compilation cache should be disabled when debugger is active.
+ CHECK_EQ(1, after_compile_message_count);
+}
+
+
+static int exception_event_count = 0;
+static void ExceptionMessageHandler(const v8::Debug::Message& message) {
+ if (message.IsEvent() && message.GetEvent() == v8::Exception) {
+ exception_event_count++;
+ SendContinueCommand();
+ }
+}
+
+
+// Tests that exception event is sent when message handler is reset.
+TEST(ExceptionMessageWhenMessageHandlerIsReset) {
+ v8::HandleScope scope;
+ DebugLocalContext env;
+ exception_event_count = 0;
+ const char* script = "function f() {throw new Error()};";
+
+ v8::Debug::SetMessageHandler2(AfterCompileMessageHandler);
+ v8::Script::Compile(v8::String::New(script))->Run();
+ v8::Debug::SetMessageHandler2(NULL);
+
+ v8::Debug::SetMessageHandler2(ExceptionMessageHandler);
+ v8::Local<v8::Function> f =
+ v8::Local<v8::Function>::Cast(env->Global()->Get(v8::String::New("f")));
+ f->Call(env->Global(), 0, NULL);
+
+ // Setting message handler to NULL should cause debugger unload.
+ v8::Debug::SetMessageHandler2(NULL);
+ CheckDebuggerUnloaded();
+
+ CHECK_EQ(1, exception_event_count);
}
diff --git a/deps/v8/test/cctest/test-func-name-inference.cc b/deps/v8/test/cctest/test-func-name-inference.cc
index d91f75fa0..1bfc8834a 100644
--- a/deps/v8/test/cctest/test-func-name-inference.cc
+++ b/deps/v8/test/cctest/test-func-name-inference.cc
@@ -32,15 +32,18 @@
#include "cctest.h"
+using ::v8::internal::CStrVector;
+using ::v8::internal::Factory;
using ::v8::internal::Handle;
+using ::v8::internal::Heap;
using ::v8::internal::JSFunction;
using ::v8::internal::Object;
+using ::v8::internal::Runtime;
using ::v8::internal::Script;
+using ::v8::internal::SmartPointer;
using ::v8::internal::SharedFunctionInfo;
using ::v8::internal::String;
-namespace i = ::v8::internal;
-
static v8::Persistent<v8::Context> env;
@@ -66,19 +69,19 @@ static void CheckFunctionName(v8::Handle<v8::Script> script,
// Find the position of a given func source substring in the source.
Handle<String> func_pos_str =
- i::Factory::NewStringFromAscii(i::CStrVector(func_pos_src));
- int func_pos = i::Runtime::StringMatch(script_src, func_pos_str, 0);
+ Factory::NewStringFromAscii(CStrVector(func_pos_src));
+ int func_pos = Runtime::StringMatch(script_src, func_pos_str, 0);
CHECK_NE(0, func_pos);
// Obtain SharedFunctionInfo for the function.
Object* shared_func_info_ptr =
- i::Runtime::FindSharedFunctionInfoInScript(i_script, func_pos);
- CHECK(shared_func_info_ptr != i::Heap::undefined_value());
+ Runtime::FindSharedFunctionInfoInScript(i_script, func_pos);
+ CHECK(shared_func_info_ptr != Heap::undefined_value());
Handle<SharedFunctionInfo> shared_func_info(
SharedFunctionInfo::cast(shared_func_info_ptr));
// Verify inferred function name.
- i::SmartPointer<char> inferred_name =
+ SmartPointer<char> inferred_name =
shared_func_info->inferred_name()->ToCString();
CHECK_EQ(ref_inferred_name, *inferred_name);
}
diff --git a/deps/v8/test/cctest/test-hashmap.cc b/deps/v8/test/cctest/test-hashmap.cc
index 954dbe103..70213c9aa 100644
--- a/deps/v8/test/cctest/test-hashmap.cc
+++ b/deps/v8/test/cctest/test-hashmap.cc
@@ -38,20 +38,29 @@ static bool DefaultMatchFun(void* a, void* b) {
}
+typedef uint32_t (*IntKeyHash)(uint32_t key);
+
+
class IntSet {
public:
- IntSet() : map_(DefaultMatchFun) {}
+ explicit IntSet(IntKeyHash hash) : hash_(hash), map_(DefaultMatchFun) {}
void Insert(int x) {
CHECK_NE(0, x); // 0 corresponds to (void*)NULL - illegal key value
- HashMap::Entry* p = map_.Lookup(reinterpret_cast<void*>(x), Hash(x), true);
+ HashMap::Entry* p = map_.Lookup(reinterpret_cast<void*>(x), hash_(x), true);
CHECK(p != NULL); // insert is set!
CHECK_EQ(reinterpret_cast<void*>(x), p->key);
// we don't care about p->value
}
+ void Remove(int x) {
+ CHECK_NE(0, x); // 0 corresponds to (void*)NULL - illegal key value
+ map_.Remove(reinterpret_cast<void*>(x), hash_(x));
+ }
+
bool Present(int x) {
- HashMap::Entry* p = map_.Lookup(reinterpret_cast<void*>(x), Hash(x), false);
+ HashMap::Entry* p =
+ map_.Lookup(reinterpret_cast<void*>(x), hash_(x), false);
if (p != NULL) {
CHECK_EQ(reinterpret_cast<void*>(x), p->key);
}
@@ -72,13 +81,17 @@ class IntSet {
}
private:
+ IntKeyHash hash_;
HashMap map_;
- static uint32_t Hash(uint32_t key) { return key * 23; }
};
-TEST(Set) {
- IntSet set;
+static uint32_t Hash(uint32_t key) { return 23; }
+static uint32_t CollisionHash(uint32_t key) { return key & 0x3; }
+
+
+void TestSet(IntKeyHash hash, int size) {
+ IntSet set(hash);
CHECK_EQ(0, set.occupancy());
set.Insert(1);
@@ -96,6 +109,18 @@ TEST(Set) {
CHECK(!set.Present(4));
CHECK_EQ(3, set.occupancy());
+ set.Remove(1);
+ CHECK(!set.Present(1));
+ CHECK(set.Present(2));
+ CHECK(set.Present(3));
+ CHECK_EQ(2, set.occupancy());
+
+ set.Remove(3);
+ CHECK(!set.Present(1));
+ CHECK(set.Present(2));
+ CHECK(!set.Present(3));
+ CHECK_EQ(1, set.occupancy());
+
set.Clear();
CHECK_EQ(0, set.occupancy());
@@ -103,21 +128,49 @@ TEST(Set) {
const int start = 453;
const int factor = 13;
const int offset = 7;
- const uint32_t n = 1000;
+ const uint32_t n = size;
int x = start;
for (uint32_t i = 0; i < n; i++) {
CHECK_EQ(i, static_cast<double>(set.occupancy()));
set.Insert(x);
- x = x*factor + offset;
+ x = x * factor + offset;
}
+ CHECK_EQ(n, static_cast<double>(set.occupancy()));
// Verify the same sequence of values.
x = start;
for (uint32_t i = 0; i < n; i++) {
CHECK(set.Present(x));
- x = x*factor + offset;
+ x = x * factor + offset;
}
-
CHECK_EQ(n, static_cast<double>(set.occupancy()));
+
+ // Remove all these values.
+ x = start;
+ for (uint32_t i = 0; i < n; i++) {
+ CHECK_EQ(n - i, static_cast<double>(set.occupancy()));
+ CHECK(set.Present(x));
+ set.Remove(x);
+ CHECK(!set.Present(x));
+ x = x * factor + offset;
+
+ // Verify the the expected values are still there.
+ int y = start;
+ for (uint32_t j = 0; j < n; j++) {
+ if (j <= i) {
+ CHECK(!set.Present(y));
+ } else {
+ CHECK(set.Present(y));
+ }
+ y = y * factor + offset;
+ }
+ }
+ CHECK_EQ(0, set.occupancy());
+}
+
+
+TEST(Set) {
+ TestSet(Hash, 100);
+ TestSet(CollisionHash, 50);
}
diff --git a/deps/v8/test/cctest/test-heap.cc b/deps/v8/test/cctest/test-heap.cc
index e35ac5fba..515657f71 100644
--- a/deps/v8/test/cctest/test-heap.cc
+++ b/deps/v8/test/cctest/test-heap.cc
@@ -36,34 +36,43 @@ TEST(HeapMaps) {
InitializeVM();
CheckMap(Heap::meta_map(), MAP_TYPE, Map::kSize);
CheckMap(Heap::heap_number_map(), HEAP_NUMBER_TYPE, HeapNumber::kSize);
- CheckMap(Heap::fixed_array_map(), FIXED_ARRAY_TYPE, Array::kHeaderSize);
+ CheckMap(Heap::fixed_array_map(), FIXED_ARRAY_TYPE, Array::kAlignedSize);
CheckMap(Heap::long_string_map(), LONG_STRING_TYPE,
- SeqTwoByteString::kHeaderSize);
+ SeqTwoByteString::kAlignedSize);
}
static void CheckOddball(Object* obj, const char* string) {
CHECK(obj->IsOddball());
+#ifndef V8_HOST_ARCH_64_BIT
+// TODO(X64): Reenable when native builtins work.
bool exc;
Object* print_string = *Execution::ToString(Handle<Object>(obj), &exc);
CHECK(String::cast(print_string)->IsEqualTo(CStrVector(string)));
+#endif // V8_HOST_ARCH_64_BIT
}
static void CheckSmi(int value, const char* string) {
+#ifndef V8_HOST_ARCH_64_BIT
+// TODO(X64): Reenable when native builtins work.
bool exc;
Object* print_string =
*Execution::ToString(Handle<Object>(Smi::FromInt(value)), &exc);
CHECK(String::cast(print_string)->IsEqualTo(CStrVector(string)));
+#endif // V8_HOST_ARCH_64_BIT
}
static void CheckNumber(double value, const char* string) {
Object* obj = Heap::NumberFromDouble(value);
CHECK(obj->IsNumber());
+#ifndef V8_HOST_ARCH_64_BIT
+// TODO(X64): Reenable when native builtins work.
bool exc;
Object* print_string = *Execution::ToString(Handle<Object>(obj), &exc);
CHECK(String::cast(print_string)->IsEqualTo(CStrVector(string)));
+#endif // V8_HOST_ARCH_64_BIT
}
@@ -178,12 +187,16 @@ TEST(HeapObjects) {
TEST(Tagging) {
InitializeVM();
+ int request = 24;
+ ASSERT_EQ(request, OBJECT_SIZE_ALIGN(request));
CHECK(Smi::FromInt(42)->IsSmi());
- CHECK(Failure::RetryAfterGC(12, NEW_SPACE)->IsFailure());
- CHECK_EQ(12, Failure::RetryAfterGC(12, NEW_SPACE)->requested());
- CHECK_EQ(NEW_SPACE, Failure::RetryAfterGC(12, NEW_SPACE)->allocation_space());
+ CHECK(Failure::RetryAfterGC(request, NEW_SPACE)->IsFailure());
+ CHECK_EQ(request, Failure::RetryAfterGC(request, NEW_SPACE)->requested());
+ CHECK_EQ(NEW_SPACE,
+ Failure::RetryAfterGC(request, NEW_SPACE)->allocation_space());
CHECK_EQ(OLD_POINTER_SPACE,
- Failure::RetryAfterGC(12, OLD_POINTER_SPACE)->allocation_space());
+ Failure::RetryAfterGC(request,
+ OLD_POINTER_SPACE)->allocation_space());
CHECK(Failure::Exception()->IsFailure());
CHECK(Smi::FromInt(Smi::kMinValue)->IsSmi());
CHECK(Smi::FromInt(Smi::kMaxValue)->IsSmi());
@@ -315,7 +328,7 @@ static bool WeakPointerCleared = false;
static void TestWeakGlobalHandleCallback(v8::Persistent<v8::Value> handle,
void* id) {
USE(handle);
- if (1234 == reinterpret_cast<int>(id)) WeakPointerCleared = true;
+ if (1234 == reinterpret_cast<intptr_t>(id)) WeakPointerCleared = true;
}
@@ -385,7 +398,7 @@ TEST(WeakGlobalHandlesMark) {
static void TestDeleteWeakGlobalHandleCallback(
v8::Persistent<v8::Value> handle,
void* id) {
- if (1234 == reinterpret_cast<int>(id)) WeakPointerCleared = true;
+ if (1234 == reinterpret_cast<intptr_t>(id)) WeakPointerCleared = true;
handle.Dispose();
}
@@ -540,7 +553,7 @@ TEST(ObjectProperties) {
CHECK(obj->HasLocalProperty(first));
// delete first
- CHECK(obj->DeleteProperty(first));
+ CHECK(obj->DeleteProperty(first, JSObject::NORMAL_DELETION));
CHECK(!obj->HasLocalProperty(first));
// add first and then second
@@ -550,9 +563,9 @@ TEST(ObjectProperties) {
CHECK(obj->HasLocalProperty(second));
// delete first and then second
- CHECK(obj->DeleteProperty(first));
+ CHECK(obj->DeleteProperty(first, JSObject::NORMAL_DELETION));
CHECK(obj->HasLocalProperty(second));
- CHECK(obj->DeleteProperty(second));
+ CHECK(obj->DeleteProperty(second, JSObject::NORMAL_DELETION));
CHECK(!obj->HasLocalProperty(first));
CHECK(!obj->HasLocalProperty(second));
@@ -563,9 +576,9 @@ TEST(ObjectProperties) {
CHECK(obj->HasLocalProperty(second));
// delete second and then first
- CHECK(obj->DeleteProperty(second));
+ CHECK(obj->DeleteProperty(second, JSObject::NORMAL_DELETION));
CHECK(obj->HasLocalProperty(first));
- CHECK(obj->DeleteProperty(first));
+ CHECK(obj->DeleteProperty(first, JSObject::NORMAL_DELETION));
CHECK(!obj->HasLocalProperty(first));
CHECK(!obj->HasLocalProperty(second));
diff --git a/deps/v8/test/cctest/test-list.cc b/deps/v8/test/cctest/test-list.cc
index 838a45d0d..624b6e939 100644
--- a/deps/v8/test/cctest/test-list.cc
+++ b/deps/v8/test/cctest/test-list.cc
@@ -65,3 +65,37 @@ TEST(ListAdd) {
list.Add(list[0]);
CHECK_EQ(1, list[4]);
}
+
+// Test that we can add all elements from a list to another list.
+TEST(ListAddAll) {
+ List<int, ZeroingAllocationPolicy> list(4);
+ list.Add(0);
+ list.Add(1);
+ list.Add(2);
+
+ CHECK_EQ(3, list.length());
+ for (int i = 0; i < 3; i++) {
+ CHECK_EQ(i, list[i]);
+ }
+
+ List<int, ZeroingAllocationPolicy> other_list(4);
+
+ // Add no elements to list since other_list is empty.
+ list.AddAll(other_list);
+ CHECK_EQ(3, list.length());
+ for (int i = 0; i < 3; i++) {
+ CHECK_EQ(i, list[i]);
+ }
+
+ // Add three elements to other_list.
+ other_list.Add(0);
+ other_list.Add(1);
+ other_list.Add(2);
+
+ // Copy the three elements from other_list to list.
+ list.AddAll(other_list);
+ CHECK_EQ(6, list.length());
+ for (int i = 0; i < 6; i++) {
+ CHECK_EQ(i % 3, list[i]);
+ }
+}
diff --git a/deps/v8/test/cctest/test-log-ia32.cc b/deps/v8/test/cctest/test-log-ia32.cc
index 43cb294b1..a40a800ee 100644
--- a/deps/v8/test/cctest/test-log-ia32.cc
+++ b/deps/v8/test/cctest/test-log-ia32.cc
@@ -13,6 +13,7 @@
#include "top.h"
#include "cctest.h"
#include "disassembler.h"
+#include "register-allocator-inl.h"
using v8::Function;
using v8::Local;
@@ -36,13 +37,11 @@ static v8::Persistent<v8::Context> env;
static struct {
- StackTracer* tracer;
TickSample* sample;
-} trace_env = { NULL, NULL };
+} trace_env = { NULL };
-static void InitTraceEnv(StackTracer* tracer, TickSample* sample) {
- trace_env.tracer = tracer;
+static void InitTraceEnv(TickSample* sample) {
trace_env.sample = sample;
}
@@ -52,7 +51,7 @@ static void DoTrace(Address fp) {
// sp is only used to define stack high bound
trace_env.sample->sp =
reinterpret_cast<unsigned int>(trace_env.sample) - 10240;
- trace_env.tracer->Trace(trace_env.sample);
+ StackTracer::Trace(trace_env.sample);
}
@@ -98,6 +97,8 @@ class TraceExtension : public v8::Extension {
v8::Handle<String> name);
static v8::Handle<v8::Value> Trace(const v8::Arguments& args);
static v8::Handle<v8::Value> JSTrace(const v8::Arguments& args);
+ static v8::Handle<v8::Value> JSEntrySP(const v8::Arguments& args);
+ static v8::Handle<v8::Value> JSEntrySPLevel2(const v8::Arguments& args);
private:
static Address GetFP(const v8::Arguments& args);
static const char* kSource;
@@ -106,8 +107,9 @@ class TraceExtension : public v8::Extension {
const char* TraceExtension::kSource =
"native function trace();"
- "native function js_trace();";
-
+ "native function js_trace();"
+ "native function js_entry_sp();"
+ "native function js_entry_sp_level2();";
v8::Handle<v8::FunctionTemplate> TraceExtension::GetNativeFunction(
v8::Handle<String> name) {
@@ -115,6 +117,10 @@ v8::Handle<v8::FunctionTemplate> TraceExtension::GetNativeFunction(
return v8::FunctionTemplate::New(TraceExtension::Trace);
} else if (name->Equals(String::New("js_trace"))) {
return v8::FunctionTemplate::New(TraceExtension::JSTrace);
+ } else if (name->Equals(String::New("js_entry_sp"))) {
+ return v8::FunctionTemplate::New(TraceExtension::JSEntrySP);
+ } else if (name->Equals(String::New("js_entry_sp_level2"))) {
+ return v8::FunctionTemplate::New(TraceExtension::JSEntrySPLevel2);
} else {
CHECK(false);
return v8::Handle<v8::FunctionTemplate>();
@@ -142,6 +148,34 @@ v8::Handle<v8::Value> TraceExtension::JSTrace(const v8::Arguments& args) {
}
+static Address GetJsEntrySp() {
+ CHECK_NE(NULL, Top::GetCurrentThread());
+ return Top::js_entry_sp(Top::GetCurrentThread());
+}
+
+
+v8::Handle<v8::Value> TraceExtension::JSEntrySP(const v8::Arguments& args) {
+ CHECK_NE(0, GetJsEntrySp());
+ return v8::Undefined();
+}
+
+
+static void CompileRun(const char* source) {
+ Script::Compile(String::New(source))->Run();
+}
+
+
+v8::Handle<v8::Value> TraceExtension::JSEntrySPLevel2(
+ const v8::Arguments& args) {
+ v8::HandleScope scope;
+ const Address js_entry_sp = GetJsEntrySp();
+ CHECK_NE(0, js_entry_sp);
+ CompileRun("js_entry_sp();");
+ CHECK_EQ(js_entry_sp, GetJsEntrySp());
+ return v8::Undefined();
+}
+
+
static TraceExtension kTraceExtension;
v8::DeclareExtension kTraceExtensionDeclaration(&kTraceExtension);
@@ -163,11 +197,6 @@ static Handle<JSFunction> CompileFunction(const char* source) {
}
-static void CompileRun(const char* source) {
- Script::Compile(String::New(source))->Run();
-}
-
-
static Local<Value> GetGlobalProperty(const char* name) {
return env->Global()->Get(String::New(name));
}
@@ -198,7 +227,8 @@ static Handle<v8::internal::String> NewString(const char* s) {
}
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
class CodeGeneratorPatcher {
public:
@@ -253,8 +283,7 @@ static void CreateTraceCallerFunction(const char* func_name,
TEST(CFromJSStackTrace) {
TickSample sample;
- StackTracer tracer(reinterpret_cast<uintptr_t>(&sample));
- InitTraceEnv(&tracer, &sample);
+ InitTraceEnv(&sample);
InitializeVM();
v8::HandleScope scope;
@@ -275,8 +304,7 @@ TEST(CFromJSStackTrace) {
TEST(PureJSStackTrace) {
TickSample sample;
- StackTracer tracer(reinterpret_cast<uintptr_t>(&sample));
- InitTraceEnv(&tracer, &sample);
+ InitTraceEnv(&sample);
InitializeVM();
v8::HandleScope scope;
@@ -321,11 +349,22 @@ static int CFunc(int depth) {
TEST(PureCStackTrace) {
TickSample sample;
- StackTracer tracer(reinterpret_cast<uintptr_t>(&sample));
- InitTraceEnv(&tracer, &sample);
+ InitTraceEnv(&sample);
// Check that sampler doesn't crash
CHECK_EQ(10, CFunc(10));
}
+TEST(JsEntrySp) {
+ InitializeVM();
+ v8::HandleScope scope;
+ CHECK_EQ(0, GetJsEntrySp());
+ CompileRun("a = 1; b = a + 1;");
+ CHECK_EQ(0, GetJsEntrySp());
+ CompileRun("js_entry_sp();");
+ CHECK_EQ(0, GetJsEntrySp());
+ CompileRun("js_entry_sp_level2();");
+ CHECK_EQ(0, GetJsEntrySp());
+}
+
#endif // ENABLE_LOGGING_AND_PROFILING
diff --git a/deps/v8/test/cctest/test-log-utils.cc b/deps/v8/test/cctest/test-log-utils.cc
new file mode 100644
index 000000000..64e590099
--- /dev/null
+++ b/deps/v8/test/cctest/test-log-utils.cc
@@ -0,0 +1,132 @@
+// Copyright 2006-2009 the V8 project authors. All rights reserved.
+//
+// Tests of logging utilities from log-utils.h
+
+#ifdef ENABLE_LOGGING_AND_PROFILING
+
+#include "v8.h"
+
+#include "log-utils.h"
+#include "cctest.h"
+
+using v8::internal::EmbeddedVector;
+using v8::internal::LogDynamicBuffer;
+using v8::internal::Vector;
+
+// Fills 'ref_buffer' with test data: a sequence of two-digit
+// hex numbers: '0001020304...'. Then writes 'ref_buffer' contents to 'dynabuf'.
+static void WriteData(LogDynamicBuffer* dynabuf, Vector<char>* ref_buffer) {
+ static const char kHex[] = "0123456789ABCDEF";
+ CHECK_GT(ref_buffer->length(), 0);
+ CHECK_GT(513, ref_buffer->length());
+ for (int i = 0, half_len = ref_buffer->length() >> 1; i < half_len; ++i) {
+ (*ref_buffer)[i << 1] = kHex[i >> 4];
+ (*ref_buffer)[(i << 1) + 1] = kHex[i & 15];
+ }
+ if (ref_buffer->length() & 1) {
+ ref_buffer->last() = kHex[ref_buffer->length() >> 5];
+ }
+ CHECK_EQ(ref_buffer->length(),
+ dynabuf->Write(ref_buffer->start(), ref_buffer->length()));
+}
+
+
+static int ReadData(
+ LogDynamicBuffer* dynabuf, int start_pos, i::Vector<char>* buffer) {
+ return dynabuf->Read(start_pos, buffer->start(), buffer->length());
+}
+
+
+// Helper function used by CHECK_EQ to compare Vectors. Templatized to
+// accept both "char" and "const char" vector contents.
+template <typename E, typename V>
+static inline void CheckEqualsHelper(const char* file, int line,
+ const char* expected_source,
+ const Vector<E>& expected,
+ const char* value_source,
+ const Vector<V>& value) {
+ if (expected.length() != value.length()) {
+ V8_Fatal(file, line, "CHECK_EQ(%s, %s) failed\n"
+ "# Vectors lengths differ: %d expected, %d found",
+ expected_source, value_source,
+ expected.length(), value.length());
+ }
+ if (strncmp(expected.start(), value.start(), expected.length()) != 0) {
+ V8_Fatal(file, line, "CHECK_EQ(%s, %s) failed\n"
+ "# Vectors contents differ:\n"
+ "# Expected: %.*s\n"
+ "# Found: %.*s",
+ expected_source, value_source,
+ expected.length(), expected.start(),
+ value.length(), value.start());
+ }
+}
+
+
+TEST(DynaBufSingleBlock) {
+ LogDynamicBuffer dynabuf(32, 32, "", 0);
+ EmbeddedVector<char, 32> ref_buf;
+ WriteData(&dynabuf, &ref_buf);
+ EmbeddedVector<char, 32> buf;
+ CHECK_EQ(32, dynabuf.Read(0, buf.start(), buf.length()));
+ CHECK_EQ(32, ReadData(&dynabuf, 0, &buf));
+ CHECK_EQ(ref_buf, buf);
+
+ // Verify that we can't read and write past the end.
+ CHECK_EQ(0, dynabuf.Read(32, buf.start(), buf.length()));
+ CHECK_EQ(0, dynabuf.Write(buf.start(), buf.length()));
+}
+
+
+TEST(DynaBufCrossBlocks) {
+ LogDynamicBuffer dynabuf(32, 128, "", 0);
+ EmbeddedVector<char, 48> ref_buf;
+ WriteData(&dynabuf, &ref_buf);
+ CHECK_EQ(48, dynabuf.Write(ref_buf.start(), ref_buf.length()));
+ // Verify that we can't write data when remaining buffer space isn't enough.
+ CHECK_EQ(0, dynabuf.Write(ref_buf.start(), ref_buf.length()));
+ EmbeddedVector<char, 48> buf;
+ CHECK_EQ(48, ReadData(&dynabuf, 0, &buf));
+ CHECK_EQ(ref_buf, buf);
+ CHECK_EQ(48, ReadData(&dynabuf, 48, &buf));
+ CHECK_EQ(ref_buf, buf);
+ CHECK_EQ(0, ReadData(&dynabuf, 48 * 2, &buf));
+}
+
+
+TEST(DynaBufReadTruncation) {
+ LogDynamicBuffer dynabuf(32, 128, "", 0);
+ EmbeddedVector<char, 128> ref_buf;
+ WriteData(&dynabuf, &ref_buf);
+ EmbeddedVector<char, 128> buf;
+ CHECK_EQ(128, ReadData(&dynabuf, 0, &buf));
+ CHECK_EQ(ref_buf, buf);
+ // Try to read near the end with a buffer larger than remaining data size.
+ EmbeddedVector<char, 48> tail_buf;
+ CHECK_EQ(32, ReadData(&dynabuf, 128 - 32, &tail_buf));
+ CHECK_EQ(ref_buf.SubVector(128 - 32, 128), tail_buf.SubVector(0, 32));
+}
+
+
+TEST(DynaBufSealing) {
+ const char* seal = "Sealed";
+ const int seal_size = strlen(seal);
+ LogDynamicBuffer dynabuf(32, 128, seal, seal_size);
+ EmbeddedVector<char, 100> ref_buf;
+ WriteData(&dynabuf, &ref_buf);
+ // Try to write data that will not fit in the buffer.
+ CHECK_EQ(0, dynabuf.Write(ref_buf.start(), 128 - 100 - seal_size + 1));
+ // Now the buffer is sealed, writing of any amount of data is forbidden.
+ CHECK_EQ(0, dynabuf.Write(ref_buf.start(), 1));
+ EmbeddedVector<char, 100> buf;
+ CHECK_EQ(100, ReadData(&dynabuf, 0, &buf));
+ CHECK_EQ(ref_buf, buf);
+ // Check the seal.
+ EmbeddedVector<char, 50> seal_buf;
+ CHECK_EQ(seal_size, ReadData(&dynabuf, 100, &seal_buf));
+ CHECK_EQ(v8::internal::CStrVector(seal), seal_buf.SubVector(0, seal_size));
+ // Verify that there's no data beyond the seal.
+ CHECK_EQ(0, ReadData(&dynabuf, 100 + seal_size, &buf));
+}
+
+#endif // ENABLE_LOGGING_AND_PROFILING
diff --git a/deps/v8/test/cctest/test-log.cc b/deps/v8/test/cctest/test-log.cc
index 6a7e54f1d..f3f7efc71 100644
--- a/deps/v8/test/cctest/test-log.cc
+++ b/deps/v8/test/cctest/test-log.cc
@@ -7,15 +7,18 @@
#include "v8.h"
#include "log.h"
-
#include "cctest.h"
+using v8::internal::Address;
+using v8::internal::EmbeddedVector;
using v8::internal::Logger;
+namespace i = v8::internal;
+
static void SetUp() {
// Log to memory buffer.
- v8::internal::FLAG_logfile = "*";
- v8::internal::FLAG_log = true;
+ i::FLAG_logfile = "*";
+ i::FLAG_log = true;
Logger::Setup();
}
@@ -54,6 +57,7 @@ TEST(GetMessages) {
memset(log_lines, 0, sizeof(log_lines));
// A bit more than the first line length.
CHECK_EQ(line_1_len, Logger::GetLogLines(0, log_lines, line_1_len + 3));
+ log_lines[line_1_len] = '\0';
CHECK_EQ(line_1, log_lines);
memset(log_lines, 0, sizeof(log_lines));
const char* line_2 = "cccc,\"dddd\"\n";
@@ -82,6 +86,11 @@ TEST(GetMessages) {
}
+static int GetLogLines(int start_pos, i::Vector<char>* buffer) {
+ return Logger::GetLogLines(start_pos, buffer->start(), buffer->length());
+}
+
+
TEST(BeyondWritePosition) {
SetUp();
Logger::StringEvent("aaa", "bbb");
@@ -89,22 +98,24 @@ TEST(BeyondWritePosition) {
// See Logger::StringEvent.
const char* all_lines = "aaa,\"bbb\"\ncccc,\"dddd\"\n";
const int all_lines_len = strlen(all_lines);
- CHECK_EQ(0, Logger::GetLogLines(all_lines_len, NULL, 1));
- CHECK_EQ(0, Logger::GetLogLines(all_lines_len, NULL, 100));
- CHECK_EQ(0, Logger::GetLogLines(all_lines_len + 1, NULL, 1));
- CHECK_EQ(0, Logger::GetLogLines(all_lines_len + 1, NULL, 100));
- CHECK_EQ(0, Logger::GetLogLines(all_lines_len + 100, NULL, 1));
- CHECK_EQ(0, Logger::GetLogLines(all_lines_len + 100, NULL, 100));
- CHECK_EQ(0, Logger::GetLogLines(10 * 1024 * 1024, NULL, 1));
- CHECK_EQ(0, Logger::GetLogLines(10 * 1024 * 1024, NULL, 100));
+ EmbeddedVector<char, 100> buffer;
+ const int beyond_write_pos = all_lines_len;
+ CHECK_EQ(0, Logger::GetLogLines(beyond_write_pos, buffer.start(), 1));
+ CHECK_EQ(0, GetLogLines(beyond_write_pos, &buffer));
+ CHECK_EQ(0, Logger::GetLogLines(beyond_write_pos + 1, buffer.start(), 1));
+ CHECK_EQ(0, GetLogLines(beyond_write_pos + 1, &buffer));
+ CHECK_EQ(0, Logger::GetLogLines(beyond_write_pos + 100, buffer.start(), 1));
+ CHECK_EQ(0, GetLogLines(beyond_write_pos + 100, &buffer));
+ CHECK_EQ(0, Logger::GetLogLines(10 * 1024 * 1024, buffer.start(), 1));
+ CHECK_EQ(0, GetLogLines(10 * 1024 * 1024, &buffer));
TearDown();
}
TEST(MemoryLoggingTurnedOff) {
// Log to stdout
- v8::internal::FLAG_logfile = "-";
- v8::internal::FLAG_log = true;
+ i::FLAG_logfile = "-";
+ i::FLAG_log = true;
Logger::Setup();
CHECK_EQ(0, Logger::GetLogLines(0, NULL, 0));
CHECK_EQ(0, Logger::GetLogLines(100, NULL, 0));
@@ -114,4 +125,588 @@ TEST(MemoryLoggingTurnedOff) {
}
+static void CompileAndRunScript(const char *src) {
+ v8::Script::Compile(v8::String::New(src))->Run();
+}
+
+
+namespace v8 {
+namespace internal {
+
+class LoggerTestHelper : public AllStatic {
+ public:
+ static bool IsSamplerActive() { return Logger::IsProfilerSamplerActive(); }
+};
+
+} // namespace v8::internal
+} // namespace v8
+
+using v8::internal::LoggerTestHelper;
+
+
+static int CheckThatProfilerWorks(int log_pos) {
+ Logger::ResumeProfiler();
+ CHECK(LoggerTestHelper::IsSamplerActive());
+
+ // Verify that the current map of compiled functions has been logged.
+ EmbeddedVector<char, 102400> buffer;
+ int map_log_size = GetLogLines(log_pos, &buffer);
+ printf("map_log_size: %d\n", map_log_size);
+ CHECK_GT(map_log_size, 0);
+ CHECK_GT(buffer.length(), map_log_size);
+ log_pos += map_log_size;
+ // Check buffer contents.
+ buffer[map_log_size] = '\0';
+ const char* code_creation = "\ncode-creation,"; // eq. to /^code-creation,/
+ CHECK_NE(NULL, strstr(buffer.start(), code_creation));
+
+ // Force compiler to generate new code by parametrizing source.
+ EmbeddedVector<char, 100> script_src;
+ i::OS::SNPrintF(script_src,
+ "for (var i = 0; i < 1000; ++i) { "
+ "(function(x) { return %d * x; })(i); }",
+ log_pos);
+ // Run code for 200 msecs to get some ticks.
+ const double end_time = i::OS::TimeCurrentMillis() + 200;
+ while (i::OS::TimeCurrentMillis() < end_time) {
+ CompileAndRunScript(script_src.start());
+ }
+
+ Logger::PauseProfiler();
+ CHECK(!LoggerTestHelper::IsSamplerActive());
+
+ // Wait 50 msecs to allow Profiler thread to process the last
+ // tick sample it has got.
+ i::OS::Sleep(50);
+
+ // Now we must have compiler and tick records.
+ int log_size = GetLogLines(log_pos, &buffer);
+ printf("log_size: %d\n", log_size);
+ CHECK_GT(log_size, 0);
+ CHECK_GT(buffer.length(), log_size);
+ log_pos += log_size;
+ // Check buffer contents.
+ buffer[log_size] = '\0';
+ const char* tick = "\ntick,";
+ CHECK_NE(NULL, strstr(buffer.start(), code_creation));
+ CHECK_NE(NULL, strstr(buffer.start(), tick));
+
+ return log_pos;
+}
+
+
+TEST(ProfLazyMode) {
+ const bool saved_prof_lazy = i::FLAG_prof_lazy;
+ const bool saved_prof = i::FLAG_prof;
+ const bool saved_prof_auto = i::FLAG_prof_auto;
+ i::FLAG_prof = true;
+ i::FLAG_prof_lazy = true;
+ i::FLAG_prof_auto = false;
+ i::FLAG_logfile = "*";
+
+ // If tests are being run manually, V8 will be already initialized
+ // by the test below.
+ const bool need_to_set_up_logger = i::V8::IsRunning();
+ v8::HandleScope scope;
+ v8::Handle<v8::Context> env = v8::Context::New();
+ if (need_to_set_up_logger) Logger::Setup();
+ env->Enter();
+
+ // No sampling should happen prior to resuming profiler.
+ CHECK(!LoggerTestHelper::IsSamplerActive());
+
+ // Read initial logged data (static libs map).
+ EmbeddedVector<char, 102400> buffer;
+ int log_pos = GetLogLines(0, &buffer);
+ CHECK_GT(log_pos, 0);
+ CHECK_GT(buffer.length(), log_pos);
+
+ CompileAndRunScript("var a = (function(x) { return x + 1; })(10);");
+
+ // Nothing must be logged while profiling is suspended.
+ CHECK_EQ(0, GetLogLines(log_pos, &buffer));
+
+ log_pos = CheckThatProfilerWorks(log_pos);
+
+ CompileAndRunScript("var a = (function(x) { return x + 1; })(10);");
+
+ // No new data beyond last retrieved position.
+ CHECK_EQ(0, GetLogLines(log_pos, &buffer));
+
+ // Check that profiling can be resumed again.
+ CheckThatProfilerWorks(log_pos);
+
+ env->Exit();
+ Logger::TearDown();
+ i::FLAG_prof_lazy = saved_prof_lazy;
+ i::FLAG_prof = saved_prof;
+ i::FLAG_prof_auto = saved_prof_auto;
+}
+
+
+static inline bool IsStringEqualTo(const char* r, const char* s) {
+ return strncmp(r, s, strlen(r)) == 0;
+}
+
+
+static bool Consume(const char* str, char** buf) {
+ if (IsStringEqualTo(str, *buf)) {
+ *buf += strlen(str);
+ return true;
+ }
+ return false;
+}
+
+
+namespace {
+
+// A code entity is a pointer to a position of code-creation event in buffer log
+// offset to a point where entity size begins, i.e.: '255,"func"\n'. This makes
+// comparing code entities pretty easy.
+typedef char* CodeEntityInfo;
+
+class Interval {
+ public:
+ Interval()
+ : min_addr_(reinterpret_cast<Address>(-1)),
+ max_addr_(reinterpret_cast<Address>(0)), next_(NULL) {}
+
+ ~Interval() { delete next_; }
+
+ size_t Length() {
+ size_t result = max_addr_ - min_addr_ + 1;
+ if (next_ != NULL) result += next_->Length();
+ return result;
+ }
+
+ void CloneFrom(Interval* src) {
+ while (src != NULL) {
+ RegisterAddress(src->min_addr_);
+ RegisterAddress(src->max_addr_);
+ src = src->next_;
+ }
+ }
+
+ bool Contains(Address addr) {
+ if (min_addr_ <= addr && addr <= max_addr_) {
+ return true;
+ }
+ if (next_ != NULL) {
+ return next_->Contains(addr);
+ } else {
+ return false;
+ }
+ }
+
+ size_t GetIndex(Address addr) {
+ if (min_addr_ <= addr && addr <= max_addr_) {
+ return addr - min_addr_;
+ }
+ CHECK_NE(NULL, next_);
+ return (max_addr_ - min_addr_ + 1) + next_->GetIndex(addr);
+ }
+
+ Address GetMinAddr() {
+ return next_ == NULL ? min_addr_ : i::Min(min_addr_, next_->GetMinAddr());
+ }
+
+ Address GetMaxAddr() {
+ return next_ == NULL ? max_addr_ : i::Max(max_addr_, next_->GetMaxAddr());
+ }
+
+ void RegisterAddress(Address addr) {
+ if (min_addr_ == reinterpret_cast<Address>(-1)
+ || (size_t)(addr > min_addr_ ?
+ addr - min_addr_ : min_addr_ - addr) < MAX_DELTA) {
+ if (addr < min_addr_) min_addr_ = addr;
+ if (addr > max_addr_) max_addr_ = addr;
+ } else {
+ if (next_ == NULL) next_ = new Interval();
+ next_->RegisterAddress(addr);
+ }
+ }
+
+ Address raw_min_addr() { return min_addr_; }
+
+ Address raw_max_addr() { return max_addr_; }
+
+ Interval* get_next() { return next_; }
+
+ private:
+ static const size_t MAX_DELTA = 0x100000;
+ Address min_addr_;
+ Address max_addr_;
+ Interval* next_;
+};
+
+
+// A structure used to return log parsing results.
+class ParseLogResult {
+ public:
+ ParseLogResult()
+ : entities_map(NULL), entities(NULL),
+ max_entities(0) {}
+
+ ~ParseLogResult() {
+ i::DeleteArray(entities_map);
+ i::DeleteArray(entities);
+ }
+
+ void AllocateEntities() {
+ // Make sure that the test doesn't operate on a bogus log.
+ CHECK_GT(max_entities, 0);
+ CHECK_GT(bounds.GetMinAddr(), 0);
+ CHECK_GT(bounds.GetMaxAddr(), bounds.GetMinAddr());
+
+ entities = i::NewArray<CodeEntityInfo>(max_entities);
+ for (int i = 0; i < max_entities; ++i) {
+ entities[i] = NULL;
+ }
+ const size_t map_length = bounds.Length();
+ entities_map = i::NewArray<int>(map_length);
+ for (size_t i = 0; i < map_length; ++i) {
+ entities_map[i] = -1;
+ }
+ }
+
+ bool HasIndexForAddress(Address addr) {
+ return bounds.Contains(addr);
+ }
+
+ size_t GetIndexForAddress(Address addr) {
+ CHECK(HasIndexForAddress(addr));
+ return bounds.GetIndex(addr);
+ }
+
+ CodeEntityInfo GetEntity(Address addr) {
+ if (HasIndexForAddress(addr)) {
+ size_t idx = GetIndexForAddress(addr);
+ int item = entities_map[idx];
+ return item != -1 ? entities[item] : NULL;
+ }
+ return NULL;
+ }
+
+ void ParseAddress(char* start) {
+ Address addr =
+ reinterpret_cast<Address>(strtoul(start, NULL, 16)); // NOLINT
+ bounds.RegisterAddress(addr);
+ }
+
+ Address ConsumeAddress(char** start) {
+ char* end_ptr;
+ Address addr =
+ reinterpret_cast<Address>(strtoul(*start, &end_ptr, 16)); // NOLINT
+ CHECK(HasIndexForAddress(addr));
+ *start = end_ptr;
+ return addr;
+ }
+
+ Interval bounds;
+ // Memory map of entities start addresses.
+ int* entities_map;
+ // An array of code entities.
+ CodeEntityInfo* entities;
+ // Maximal entities count. Actual entities count can be lower,
+ // empty entity slots are pointing to NULL.
+ int max_entities;
+};
+
+} // namespace
+
+
+typedef void (*ParserBlock)(char* start, char* end, ParseLogResult* result);
+
+static void ParserCycle(
+ char* start, char* end, ParseLogResult* result,
+ ParserBlock block_creation, ParserBlock block_delete,
+ ParserBlock block_move) {
+
+ const char* code_creation = "code-creation,";
+ const char* code_delete = "code-delete,";
+ const char* code_move = "code-move,";
+
+ const char* lazy_compile = "LazyCompile,";
+ const char* script = "Script,";
+ const char* function = "Function,";
+
+ while (start < end) {
+ if (Consume(code_creation, &start)) {
+ if (Consume(lazy_compile, &start)
+ || Consume(script, &start)
+ || Consume(function, &start)) {
+ block_creation(start, end, result);
+ }
+ } else if (Consume(code_delete, &start)) {
+ block_delete(start, end, result);
+ } else if (Consume(code_move, &start)) {
+ block_move(start, end, result);
+ }
+ while (start < end && *start != '\n') ++start;
+ ++start;
+ }
+}
+
+
+static void Pass1CodeCreation(char* start, char* end, ParseLogResult* result) {
+ result->ParseAddress(start);
+ ++result->max_entities;
+}
+
+
+static void Pass1CodeDelete(char* start, char* end, ParseLogResult* result) {
+ result->ParseAddress(start);
+}
+
+
+static void Pass1CodeMove(char* start, char* end, ParseLogResult* result) {
+ result->ParseAddress(start);
+ // Skip old address.
+ while (start < end && *start != ',') ++start;
+ CHECK_GT(end, start);
+ ++start; // Skip ','.
+ result->ParseAddress(start);
+}
+
+
+static void Pass2CodeCreation(char* start, char* end, ParseLogResult* result) {
+ Address addr = result->ConsumeAddress(&start);
+ CHECK_GT(end, start);
+ ++start; // Skip ','.
+
+ size_t idx = result->GetIndexForAddress(addr);
+ result->entities_map[idx] = -1;
+ for (int i = 0; i < result->max_entities; ++i) {
+ // Find an empty slot and fill it.
+ if (result->entities[i] == NULL) {
+ result->entities[i] = start;
+ result->entities_map[idx] = i;
+ break;
+ }
+ }
+ // Make sure that a slot was found.
+ CHECK_GE(result->entities_map[idx], 0);
+}
+
+
+static void Pass2CodeDelete(char* start, char* end, ParseLogResult* result) {
+ Address addr = result->ConsumeAddress(&start);
+ size_t idx = result->GetIndexForAddress(addr);
+ // There can be code deletes that are not related to JS code.
+ if (result->entities_map[idx] >= 0) {
+ result->entities[result->entities_map[idx]] = NULL;
+ result->entities_map[idx] = -1;
+ }
+}
+
+
+static void Pass2CodeMove(char* start, char* end, ParseLogResult* result) {
+ Address from_addr = result->ConsumeAddress(&start);
+ CHECK_GT(end, start);
+ ++start; // Skip ','.
+ Address to_addr = result->ConsumeAddress(&start);
+ CHECK_GT(end, start);
+
+ size_t from_idx = result->GetIndexForAddress(from_addr);
+ size_t to_idx = result->GetIndexForAddress(to_addr);
+ // There can be code moves that are not related to JS code.
+ if (from_idx != to_idx && result->entities_map[from_idx] >= 0) {
+ CHECK_EQ(-1, result->entities_map[to_idx]);
+ result->entities_map[to_idx] = result->entities_map[from_idx];
+ result->entities_map[from_idx] = -1;
+ };
+}
+
+
+static void ParseLog(char* start, char* end, ParseLogResult* result) {
+ // Pass 1: Calculate boundaries of addresses and entities count.
+ ParserCycle(start, end, result,
+ Pass1CodeCreation, Pass1CodeDelete, Pass1CodeMove);
+
+ printf("min_addr: %p, max_addr: %p, entities: %d\n",
+ result->bounds.GetMinAddr(), result->bounds.GetMaxAddr(),
+ result->max_entities);
+
+ result->AllocateEntities();
+
+ // Pass 2: Fill in code entries data.
+ ParserCycle(start, end, result,
+ Pass2CodeCreation, Pass2CodeDelete, Pass2CodeMove);
+}
+
+
+static inline void PrintCodeEntityInfo(CodeEntityInfo entity) {
+ const int max_len = 50;
+ if (entity != NULL) {
+ char* eol = strchr(entity, '\n');
+ int len = eol - entity;
+ len = len <= max_len ? len : max_len;
+ printf("%-*.*s ", max_len, len, entity);
+ } else {
+ printf("%*s", max_len + 1, "");
+ }
+}
+
+
+static void PrintCodeEntitiesInfo(
+ bool is_equal, Address addr,
+ CodeEntityInfo l_entity, CodeEntityInfo r_entity) {
+ printf("%c %p ", is_equal ? ' ' : '*', addr);
+ PrintCodeEntityInfo(l_entity);
+ PrintCodeEntityInfo(r_entity);
+ printf("\n");
+}
+
+
+static inline int StrChrLen(const char* s, char c) {
+ return strchr(s, c) - s;
+}
+
+
+static bool AreFuncSizesEqual(CodeEntityInfo ref_s, CodeEntityInfo new_s) {
+ int ref_len = StrChrLen(ref_s, ',');
+ int new_len = StrChrLen(new_s, ',');
+ return ref_len == new_len && strncmp(ref_s, new_s, ref_len) == 0;
+}
+
+
+static bool AreFuncNamesEqual(CodeEntityInfo ref_s, CodeEntityInfo new_s) {
+ // Skip size.
+ ref_s = strchr(ref_s, ',') + 1;
+ new_s = strchr(new_s, ',') + 1;
+ int ref_len = StrChrLen(ref_s, '\n');
+ int new_len = StrChrLen(new_s, '\n');
+ // If reference is anonymous (""), it's OK to have anything in new.
+ if (ref_len == 2) return true;
+ // A special case for ErrorPrototype. Haven't yet figured out why they
+ // are different.
+ const char* error_prototype = "\"ErrorPrototype";
+ if (IsStringEqualTo(error_prototype, ref_s)
+ && IsStringEqualTo(error_prototype, new_s)) {
+ return true;
+ }
+ // Built-in objects have problems too.
+ const char* built_ins[] = {
+ "\"Boolean\"", "\"Function\"", "\"Number\"",
+ "\"Object\"", "\"Script\"", "\"String\""
+ };
+ for (size_t i = 0; i < sizeof(built_ins) / sizeof(*built_ins); ++i) {
+ if (IsStringEqualTo(built_ins[i], new_s)) {
+ return true;
+ }
+ }
+ return ref_len == new_len && strncmp(ref_s, new_s, ref_len) == 0;
+}
+
+
+static bool AreEntitiesEqual(CodeEntityInfo ref_e, CodeEntityInfo new_e) {
+ if (ref_e == NULL && new_e != NULL) return true;
+ if (ref_e != NULL && new_e != NULL) {
+ return AreFuncSizesEqual(ref_e, new_e) && AreFuncNamesEqual(ref_e, new_e);
+ }
+ if (ref_e != NULL && new_e == NULL) {
+ // args_count entities (argument adapters) are not found by heap traversal,
+ // but they are not needed because they doesn't contain any code.
+ ref_e = strchr(ref_e, ',') + 1;
+ const char* args_count = "\"args_count:";
+ return IsStringEqualTo(args_count, ref_e);
+ }
+ return false;
+}
+
+
+// Test that logging of code create / move / delete events
+// is equivalent to traversal of a resulting heap.
+TEST(EquivalenceOfLoggingAndTraversal) {
+ // This test needs to be run on a "clean" V8 to ensure that snapshot log
+ // is loaded. This is always true when running using tools/test.py because
+ // it launches a new cctest instance for every test. To be sure that launching
+ // cctest manually also works, please be sure that no tests below
+ // are using V8.
+ //
+ // P.S. No, V8 can't be re-initialized after disposal, see include/v8.h.
+ CHECK(!i::V8::IsRunning());
+
+ i::FLAG_logfile = "*";
+ i::FLAG_log = true;
+ i::FLAG_log_code = true;
+
+ // Make sure objects move.
+ bool saved_always_compact = i::FLAG_always_compact;
+ if (!i::FLAG_never_compact) {
+ i::FLAG_always_compact = true;
+ }
+
+ v8::HandleScope scope;
+ v8::Handle<v8::Value> global_object = v8::Handle<v8::Value>();
+ v8::Handle<v8::Context> env = v8::Context::New(
+ 0, v8::Handle<v8::ObjectTemplate>(), global_object);
+ env->Enter();
+
+ // Compile and run a function that creates other functions.
+ CompileAndRunScript(
+ "(function f(obj) {\n"
+ " obj.test =\n"
+ " (function a(j) { return function b() { return j; } })(100);\n"
+ "})(this);");
+ i::Heap::CollectAllGarbage();
+
+ EmbeddedVector<char, 204800> buffer;
+ int log_size;
+ ParseLogResult ref_result;
+
+ // Retrieve the log.
+ {
+ // Make sure that no GCs occur prior to LogCompiledFunctions call.
+ i::AssertNoAllocation no_alloc;
+
+ log_size = GetLogLines(0, &buffer);
+ CHECK_GT(log_size, 0);
+ CHECK_GT(buffer.length(), log_size);
+
+ // Fill a map of compiled code objects.
+ ParseLog(buffer.start(), buffer.start() + log_size, &ref_result);
+ }
+
+ // Iterate heap to find compiled functions, will write to log.
+ i::Logger::LogCompiledFunctions();
+ char* new_log_start = buffer.start() + log_size;
+ const int new_log_size = Logger::GetLogLines(
+ log_size, new_log_start, buffer.length() - log_size);
+ CHECK_GT(new_log_size, 0);
+ CHECK_GT(buffer.length(), log_size + new_log_size);
+
+ // Fill an equivalent map of compiled code objects.
+ ParseLogResult new_result;
+ ParseLog(new_log_start, new_log_start + new_log_size, &new_result);
+
+ // Test their actual equivalence.
+ Interval combined;
+ combined.CloneFrom(&ref_result.bounds);
+ combined.CloneFrom(&new_result.bounds);
+ Interval* iter = &combined;
+ bool results_equal = true;
+
+ while (iter != NULL) {
+ for (Address addr = iter->raw_min_addr();
+ addr <= iter->raw_max_addr(); ++addr) {
+ CodeEntityInfo ref_entity = ref_result.GetEntity(addr);
+ CodeEntityInfo new_entity = new_result.GetEntity(addr);
+ if (ref_entity != NULL || new_entity != NULL) {
+ const bool equal = AreEntitiesEqual(ref_entity, new_entity);
+ if (!equal) results_equal = false;
+ PrintCodeEntitiesInfo(equal, addr, ref_entity, new_entity);
+ }
+ }
+ iter = iter->get_next();
+ }
+ // Make sure that all log data is written prior crash due to CHECK failure.
+ fflush(stdout);
+ CHECK(results_equal);
+
+ env->Exit();
+ Logger::TearDown();
+ i::FLAG_always_compact = saved_always_compact;
+}
+
#endif // ENABLE_LOGGING_AND_PROFILING
diff --git a/deps/v8/test/cctest/test-utils.cc b/deps/v8/test/cctest/test-utils.cc
index acb5d3ca6..23b3254c2 100644
--- a/deps/v8/test/cctest/test-utils.cc
+++ b/deps/v8/test/cctest/test-utils.cc
@@ -152,6 +152,13 @@ TEST(Utils1) {
CHECK_EQ(0, FastD2I(0.345));
CHECK_EQ(1, FastD2I(1.234));
CHECK_EQ(1000000, FastD2I(1000000.123));
+ // Check that >> is implemented as arithmetic shift right.
+ // If this is not true, then ArithmeticShiftRight() must be changed,
+ // There are also documented right shifts in assembler.cc of
+ // int8_t and intptr_t signed integers.
+ CHECK_EQ(-2, -8 >> 2);
+ CHECK_EQ(-2, static_cast<int8_t>(-8) >> 2);
+ CHECK_EQ(-2, static_cast<intptr_t>(-8) >> 2);
}
diff --git a/deps/v8/test/cctest/test-version.cc b/deps/v8/test/cctest/test-version.cc
index 0b93fbf8c..6d2685596 100644
--- a/deps/v8/test/cctest/test-version.cc
+++ b/deps/v8/test/cctest/test-version.cc
@@ -33,7 +33,8 @@
using namespace v8::internal;
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
void SetVersion(int major, int minor, int build, int patch,
bool candidate, const char* soname) {
diff --git a/deps/v8/test/mjsunit/codegen_coverage.js b/deps/v8/test/mjsunit/codegen-coverage.js
index d5e7769d7..d5e7769d7 100644
--- a/deps/v8/test/mjsunit/codegen_coverage.js
+++ b/deps/v8/test/mjsunit/codegen-coverage.js
diff --git a/deps/v8/test/mjsunit/debug-backtrace.js b/deps/v8/test/mjsunit/debug-backtrace.js
index f08f6390e..1d2bb9af9 100644
--- a/deps/v8/test/mjsunit/debug-backtrace.js
+++ b/deps/v8/test/mjsunit/debug-backtrace.js
@@ -37,7 +37,7 @@ var m = function() {
};
function g() {
- m();
+ m();
};
@@ -80,8 +80,9 @@ function listener(event, exec_state, event_data, data) {
{
// The expected backtrace is
// 0: f
- // 1: g
- // 2: [anonymous]
+ // 1: m
+ // 2: g
+ // 3: [anonymous]
var response;
var backtrace;
@@ -133,6 +134,23 @@ function listener(event, exec_state, event_data, data) {
assertEquals(2, frames[1].index);
assertEquals("g", response.lookup(frames[1].func.ref).name);
+ // Get backtrace with bottom two frames.
+ json = '{"seq":0,"type":"request","command":"backtrace","arguments":{"fromFrame":0,"toFrame":2, "bottom":true}}'
+ response = new ParsedResponse(dcp.processDebugJSONRequest(json));
+ backtrace = response.body();
+ assertEquals(2, backtrace.fromFrame);
+ assertEquals(4, backtrace.toFrame);
+ assertEquals(4, backtrace.totalFrames);
+ var frames = backtrace.frames;
+ assertEquals(2, frames.length);
+ for (var i = 0; i < frames.length; i++) {
+ assertEquals('frame', frames[i].type);
+ }
+ assertEquals(2, frames[0].index);
+ assertEquals("g", response.lookup(frames[0].func.ref).name);
+ assertEquals(3, frames[1].index);
+ assertEquals("", response.lookup(frames[1].func.ref).name);
+
// Get the individual frames.
json = '{"seq":0,"type":"request","command":"frame"}'
response = new ParsedResponse(dcp.processDebugJSONRequest(json));
diff --git a/deps/v8/test/mjsunit/debug-compile-event.js b/deps/v8/test/mjsunit/debug-compile-event.js
index 035e36c30..c346f76e8 100644
--- a/deps/v8/test/mjsunit/debug-compile-event.js
+++ b/deps/v8/test/mjsunit/debug-compile-event.js
@@ -32,8 +32,11 @@ Debug = debug.Debug
var exception = false; // Exception in debug event listener.
var before_compile_count = 0;
var after_compile_count = 0;
-var current_source = ''; // Current source compiled.
-var source_count = 0; // Total number of scource sompiled.
+var current_source = ''; // Current source being compiled.
+var source_count = 0; // Total number of scources compiled.
+var host_compilations = 0; // Number of scources compiled through the API.
+var eval_compilations = 0; // Number of scources compiled through eval.
+var json_compilations = 0; // Number of scources compiled through JSON.parse.
function compileSource(source) {
@@ -52,19 +55,41 @@ function listener(event, exec_state, event_data, data) {
before_compile_count++;
} else {
after_compile_count++;
+ switch (event_data.script().compilationType()) {
+ case Debug.ScriptCompilationType.Host:
+ host_compilations++;
+ break;
+ case Debug.ScriptCompilationType.Eval:
+ eval_compilations++;
+ break;
+ case Debug.ScriptCompilationType.JSON:
+ json_compilations++;
+ break;
+ }
}
-
+
// If the compiled source contains 'eval' there will be additional compile
// events for the source inside eval.
if (current_source.indexOf('eval') == 0) {
// For source with 'eval' there will be compile events with substrings
// as well as with with the exact source.
assertTrue(current_source.indexOf(event_data.script().source()) >= 0);
+ } else if (current_source.indexOf('JSON.parse') == 0) {
+ // For JSON the JSON source will be in parentheses.
+ var s = event_data.script().source();
+ if (s[0] == '(') {
+ s = s.substring(1, s.length - 2);
+ }
+ assertTrue(current_source.indexOf(s) >= 0);
} else {
// For source without 'eval' there will be a compile events with the
// exact source.
assertEquals(current_source, event_data.script().source());
}
+ // Check that script context is included into the event message.
+ var json = event_data.toJSONProtocol();
+ var msg = eval('(' + json + ')');
+ assertTrue('context' in msg.body.script);
}
} catch (e) {
exception = e
@@ -82,6 +107,8 @@ compileSource('eval("a=2")');
source_count++; // Using eval causes additional compilation event.
compileSource('eval("eval(\'function(){return a;}\')")');
source_count += 2; // Using eval causes additional compilation event.
+compileSource('JSON.parse("{a:1,b:2}")');
+source_count++; // Using JSON.parse causes additional compilation event.
// Make sure that the debug event listener was invoked.
assertFalse(exception, "exception in listener")
@@ -89,7 +116,11 @@ assertFalse(exception, "exception in listener")
// Number of before and after compile events should be the same.
assertEquals(before_compile_count, after_compile_count);
-// Check the actual number of events.
+// Check the actual number of events (no compilation through the API as all
+// source compiled through eval except for one JSON.parse call).
assertEquals(source_count, after_compile_count);
+assertEquals(0, host_compilations);
+assertEquals(source_count - 1, eval_compilations);
+assertEquals(1, json_compilations);
Debug.setListener(null);
diff --git a/deps/v8/test/mjsunit/debug-references.js b/deps/v8/test/mjsunit/debug-references.js
index dedc3eff4..1fde1ac74 100644
--- a/deps/v8/test/mjsunit/debug-references.js
+++ b/deps/v8/test/mjsunit/debug-references.js
@@ -113,6 +113,6 @@ q = new Point(1,2);
// Enter debugger causing the event listener to be called.
debugger;
-// Make sure that the debug event listener vas invoked.
+// Make sure that the debug event listener was invoked.
assertFalse(exception, "exception in listener")
assertTrue(listenerComplete, "listener did not run to completion");
diff --git a/deps/v8/test/mjsunit/debug-scripts-request.js b/deps/v8/test/mjsunit/debug-scripts-request.js
index cf1615bb7..80b3bce59 100644
--- a/deps/v8/test/mjsunit/debug-scripts-request.js
+++ b/deps/v8/test/mjsunit/debug-scripts-request.js
@@ -66,9 +66,6 @@ function listener(event, exec_state, event_data, data) {
testArguments(dcp, '{"types":"xx"}', false);
// Test legal scripts requests.
- var request = '{' + base_request + '}'
- var response = safeEval(dcp.processDebugJSONRequest(request));
- assertTrue(response.success);
testArguments(dcp, '{}', true);
testArguments(dcp, '{"types":1}', true);
testArguments(dcp, '{"types":2}', true);
@@ -76,6 +73,21 @@ function listener(event, exec_state, event_data, data) {
testArguments(dcp, '{"types":7}', true);
testArguments(dcp, '{"types":0xFF}', true);
+ // Test request for all scripts.
+ var request = '{' + base_request + '}'
+ var response = safeEval(dcp.processDebugJSONRequest(request));
+ assertTrue(response.success);
+
+ // Test filtering by id.
+ assertEquals(2, response.body.length);
+ var script = response.body[0];
+ var request = '{' + base_request + ',"arguments":{"ids":[' +
+ script.id + ']}}';
+ var response = safeEval(dcp.processDebugJSONRequest(request));
+ assertTrue(response.success);
+ assertEquals(1, response.body.length);
+ assertEquals(script.id, response.body[0].id);
+
// Indicate that all was processed.
listenerComplete = true;
}
@@ -91,5 +103,6 @@ Debug.setListener(listener);
debugger;
// Make sure that the debug event listener vas invoked with no exceptions.
-assertTrue(listenerComplete, "listener did not run to completion");
+assertTrue(listenerComplete,
+ "listener did not run to completion, exception: " + exception);
assertFalse(exception, "exception in listener")
diff --git a/deps/v8/test/mjsunit/mirror-array.js b/deps/v8/test/mjsunit/mirror-array.js
index 1873d1eb6..eb8f72a8c 100644
--- a/deps/v8/test/mjsunit/mirror-array.js
+++ b/deps/v8/test/mjsunit/mirror-array.js
@@ -44,8 +44,9 @@ function testArrayMirror(a, names) {
// Create mirror and JSON representation.
var mirror = debug.MakeMirror(a);
var serializer = debug.MakeMirrorSerializer();
- var json = serializer.serializeValue(mirror);
- var refs = new MirrorRefCache(serializer.serializeReferencedObjects());
+ var json = JSON.stringify(serializer.serializeValue(mirror));
+ var refs = new MirrorRefCache(
+ JSON.stringify(serializer.serializeReferencedObjects()));
// Check the mirror hierachy.
assertTrue(mirror instanceof debug.Mirror, 'Unexpected mirror hierachy');
diff --git a/deps/v8/test/mjsunit/mirror-boolean.js b/deps/v8/test/mjsunit/mirror-boolean.js
index 4f9308924..311c78176 100644
--- a/deps/v8/test/mjsunit/mirror-boolean.js
+++ b/deps/v8/test/mjsunit/mirror-boolean.js
@@ -32,7 +32,7 @@ function testBooleanMirror(b) {
// Create mirror and JSON representation.
var mirror = debug.MakeMirror(b);
var serializer = debug.MakeMirrorSerializer();
- var json = serializer.serializeValue(mirror);
+ var json = JSON.stringify(serializer.serializeValue(mirror));
// Check the mirror hierachy.
assertTrue(mirror instanceof debug.Mirror);
diff --git a/deps/v8/test/mjsunit/mirror-date.js b/deps/v8/test/mjsunit/mirror-date.js
index a6334d0f1..6b6a3ad40 100644
--- a/deps/v8/test/mjsunit/mirror-date.js
+++ b/deps/v8/test/mjsunit/mirror-date.js
@@ -32,7 +32,7 @@ function testDateMirror(d, iso8601) {
// Create mirror and JSON representation.
var mirror = debug.MakeMirror(d);
var serializer = debug.MakeMirrorSerializer();
- var json = serializer.serializeValue(mirror);
+ var json = JSON.stringify(serializer.serializeValue(mirror));
// Check the mirror hierachy.
assertTrue(mirror instanceof debug.Mirror);
@@ -55,9 +55,9 @@ function testDateMirror(d, iso8601) {
assertEquals(iso8601, fromJSON.value);
}
-
// Test Date values.
-testDateMirror(new Date(Date.parse("Dec 25, 1995 1:30 UTC")), "1995-12-25T01:30:00.000Z");
+testDateMirror(new Date(Date.parse("Dec 25, 1995 1:30 UTC")),
+ "1995-12-25T01:30:00Z");
d = new Date();
d.setUTCFullYear(1967);
d.setUTCMonth(0); // January.
@@ -66,10 +66,10 @@ d.setUTCHours(9);
d.setUTCMinutes(22);
d.setUTCSeconds(59);
d.setUTCMilliseconds(0);
-testDateMirror(d, "1967-01-17T09:22:59.000Z");
+testDateMirror(d, "1967-01-17T09:22:59Z");
d.setUTCMilliseconds(1);
-testDateMirror(d, "1967-01-17T09:22:59.001Z");
-d.setUTCMilliseconds(12);
-testDateMirror(d, "1967-01-17T09:22:59.012Z");
-d.setUTCMilliseconds(123);
-testDateMirror(d, "1967-01-17T09:22:59.123Z");
+testDateMirror(d, "1967-01-17T09:22:59Z");
+d.setUTCSeconds(12);
+testDateMirror(d, "1967-01-17T09:22:12Z");
+d.setUTCSeconds(36);
+testDateMirror(d, "1967-01-17T09:22:36Z");
diff --git a/deps/v8/test/mjsunit/mirror-error.js b/deps/v8/test/mjsunit/mirror-error.js
index 37ec46c86..4ed8c1b42 100644
--- a/deps/v8/test/mjsunit/mirror-error.js
+++ b/deps/v8/test/mjsunit/mirror-error.js
@@ -44,8 +44,9 @@ function testErrorMirror(e) {
// Create mirror and JSON representation.
var mirror = debug.MakeMirror(e);
var serializer = debug.MakeMirrorSerializer();
- var json = serializer.serializeValue(mirror);
- var refs = new MirrorRefCache(serializer.serializeReferencedObjects());
+ var json = JSON.stringify(serializer.serializeValue(mirror));
+ var refs = new MirrorRefCache(
+ JSON.stringify(serializer.serializeReferencedObjects()));
// Check the mirror hierachy.
assertTrue(mirror instanceof debug.Mirror);
diff --git a/deps/v8/test/mjsunit/mirror-function.js b/deps/v8/test/mjsunit/mirror-function.js
index 59d9e7862..58aee3dae 100644
--- a/deps/v8/test/mjsunit/mirror-function.js
+++ b/deps/v8/test/mjsunit/mirror-function.js
@@ -44,8 +44,9 @@ function testFunctionMirror(f) {
// Create mirror and JSON representation.
var mirror = debug.MakeMirror(f);
var serializer = debug.MakeMirrorSerializer();
- var json = serializer.serializeValue(mirror);
- var refs = new MirrorRefCache(serializer.serializeReferencedObjects());
+ var json = JSON.stringify(serializer.serializeValue(mirror));
+ var refs = new MirrorRefCache(
+ JSON.stringify(serializer.serializeReferencedObjects()));
// Check the mirror hierachy.
assertTrue(mirror instanceof debug.Mirror);
diff --git a/deps/v8/test/mjsunit/mirror-null.js b/deps/v8/test/mjsunit/mirror-null.js
index db68966a4..1ee555b7b 100644
--- a/deps/v8/test/mjsunit/mirror-null.js
+++ b/deps/v8/test/mjsunit/mirror-null.js
@@ -31,7 +31,7 @@
// Create mirror and JSON representation.
var mirror = debug.MakeMirror(null);
var serializer = debug.MakeMirrorSerializer();
-var json = serializer.serializeValue(mirror);
+var json = JSON.stringify(serializer.serializeValue(mirror));
// Check the mirror hierachy.
assertTrue(mirror instanceof debug.Mirror);
diff --git a/deps/v8/test/mjsunit/mirror-number.js b/deps/v8/test/mjsunit/mirror-number.js
index 68eb0d7e7..2db5df439 100644
--- a/deps/v8/test/mjsunit/mirror-number.js
+++ b/deps/v8/test/mjsunit/mirror-number.js
@@ -32,7 +32,7 @@ function testNumberMirror(n) {
// Create mirror and JSON representation.
var mirror = debug.MakeMirror(n);
var serializer = debug.MakeMirrorSerializer();
- var json = serializer.serializeValue(mirror);
+ var json = JSON.stringify(serializer.serializeValue(mirror));
// Check the mirror hierachy.
assertTrue(mirror instanceof debug.Mirror);
diff --git a/deps/v8/test/mjsunit/mirror-object.js b/deps/v8/test/mjsunit/mirror-object.js
index e829a0e22..ad7add8ae 100644
--- a/deps/v8/test/mjsunit/mirror-object.js
+++ b/deps/v8/test/mjsunit/mirror-object.js
@@ -44,8 +44,9 @@ function testObjectMirror(obj, cls_name, ctor_name, hasSpecialProperties) {
// Create mirror and JSON representation.
var mirror = debug.MakeMirror(obj);
var serializer = debug.MakeMirrorSerializer();
- var json = serializer.serializeValue(mirror);
- var refs = new MirrorRefCache(serializer.serializeReferencedObjects());
+ var json = JSON.stringify(serializer.serializeValue(mirror));
+ var refs = new MirrorRefCache(
+ JSON.stringify(serializer.serializeReferencedObjects()));
// Check the mirror hierachy.
assertTrue(mirror instanceof debug.Mirror, 'Unexpected mirror hierachy');
@@ -105,7 +106,7 @@ function testObjectMirror(obj, cls_name, ctor_name, hasSpecialProperties) {
assertEquals(names.length, fromJSON.properties.length, 'Some properties missing in JSON');
for (var i = 0; i < fromJSON.properties.length; i++) {
var name = fromJSON.properties[i].name;
- if (!name) name = fromJSON.properties[i].index;
+ if (typeof name == 'undefined') name = fromJSON.properties[i].index;
var found = false;
for (var j = 0; j < names.length; j++) {
if (names[j] == name) {
@@ -157,7 +158,6 @@ function Point(x,y) {
this.y_ = y;
}
-
// Test a number of different objects.
testObjectMirror({}, 'Object', 'Object');
testObjectMirror({'a':1,'b':2}, 'Object', 'Object');
diff --git a/deps/v8/test/mjsunit/mirror-regexp.js b/deps/v8/test/mjsunit/mirror-regexp.js
index 0490b17f0..8c834bf38 100644
--- a/deps/v8/test/mjsunit/mirror-regexp.js
+++ b/deps/v8/test/mjsunit/mirror-regexp.js
@@ -55,8 +55,9 @@ function testRegExpMirror(r) {
// Create mirror and JSON representation.
var mirror = debug.MakeMirror(r);
var serializer = debug.MakeMirrorSerializer();
- var json = serializer.serializeValue(mirror);
- var refs = new MirrorRefCache(serializer.serializeReferencedObjects());
+ var json = JSON.stringify(serializer.serializeValue(mirror));
+ var refs = new MirrorRefCache(
+ JSON.stringify(serializer.serializeReferencedObjects()));
// Check the mirror hierachy.
assertTrue(mirror instanceof debug.Mirror);
diff --git a/deps/v8/test/mjsunit/mirror-script.js b/deps/v8/test/mjsunit/mirror-script.js
index 61f0f3aa6..9b67b9ba0 100644
--- a/deps/v8/test/mjsunit/mirror-script.js
+++ b/deps/v8/test/mjsunit/mirror-script.js
@@ -25,14 +25,15 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --expose-debug-as debug
+// Flags: --expose-debug-as debug --allow-natives-syntax
// Test the mirror object for scripts.
-function testScriptMirror(f, file_name, file_lines, script_type, script_source) {
+function testScriptMirror(f, file_name, file_lines, type, compilation_type,
+ source, eval_from_line) {
// Create mirror and JSON representation.
var mirror = debug.MakeMirror(f).script();
var serializer = debug.MakeMirrorSerializer();
- var json = serializer.serializeValue(mirror);
+ var json = JSON.stringify(serializer.serializeValue(mirror));
// Check the mirror hierachy.
assertTrue(mirror instanceof debug.Mirror);
@@ -53,13 +54,17 @@ function testScriptMirror(f, file_name, file_lines, script_type, script_source)
if (file_lines > 0) {
assertEquals(file_lines, mirror.lineCount());
}
- assertEquals(script_type, mirror.scriptType());
- if (script_source) {
- assertEquals(script_source, mirror.source());
+ assertEquals(type, mirror.scriptType());
+ assertEquals(compilation_type, mirror.compilationType(), "compilation type");
+ if (source) {
+ assertEquals(source, mirror.source());
+ }
+ if (eval_from_line) {
+ assertEquals(eval_from_line, mirror.evalFromLocation().line);
}
// Parse JSON representation and check.
- var fromJSON = eval('(' + json + ')');
+ var fromJSON = JSON.parse(json);
assertEquals('script', fromJSON.type);
name = fromJSON.name;
if (name) {
@@ -72,15 +77,18 @@ function testScriptMirror(f, file_name, file_lines, script_type, script_source)
if (file_lines > 0) {
assertEquals(file_lines, fromJSON.lineCount);
}
- assertEquals(script_type, fromJSON.scriptType);
+ assertEquals(type, fromJSON.scriptType);
+ assertEquals(compilation_type, fromJSON.compilationType);
}
// Test the script mirror for different functions.
-testScriptMirror(function(){}, 'mirror-script.js', 92, 2);
-testScriptMirror(Math.sin, 'native math.js', -1, 0);
-testScriptMirror(eval('function(){}'), null, 1, 2, 'function(){}');
-testScriptMirror(eval('function(){\n }'), null, 2, 2, 'function(){\n }');
+testScriptMirror(function(){}, 'mirror-script.js', 100, 2, 0);
+testScriptMirror(Math.sin, 'native math.js', -1, 0, 0);
+testScriptMirror(eval('function(){}'), null, 1, 2, 1, 'function(){}', 87);
+testScriptMirror(eval('function(){\n }'), null, 2, 2, 1, 'function(){\n }', 88);
+testScriptMirror(%CompileString("({a:1,b:2})", true), null, 1, 2, 2, '({a:1,b:2})');
+testScriptMirror(%CompileString("({a:1,\n b:2})", true), null, 2, 2, 2, '({a:1,\n b:2})');
// Test taking slices of source.
var mirror = debug.MakeMirror(eval('function(){\n 1;\n}')).script();
diff --git a/deps/v8/test/mjsunit/mirror-string.js b/deps/v8/test/mjsunit/mirror-string.js
index eeabc5fe2..c241849d8 100644
--- a/deps/v8/test/mjsunit/mirror-string.js
+++ b/deps/v8/test/mjsunit/mirror-string.js
@@ -34,7 +34,7 @@ function testStringMirror(s) {
// Create mirror and JSON representation.
var mirror = debug.MakeMirror(s);
var serializer = debug.MakeMirrorSerializer();
- var json = serializer.serializeValue(mirror);
+ var json = JSON.stringify(serializer.serializeValue(mirror));
// Check the mirror hierachy.
assertTrue(mirror instanceof debug.Mirror);
diff --git a/deps/v8/test/mjsunit/mirror-undefined.js b/deps/v8/test/mjsunit/mirror-undefined.js
index 2b5b84c6a..7f63239e5 100644
--- a/deps/v8/test/mjsunit/mirror-undefined.js
+++ b/deps/v8/test/mjsunit/mirror-undefined.js
@@ -31,7 +31,7 @@
// Create mirror and JSON representation.
var mirror = debug.MakeMirror(void 0);
var serializer = debug.MakeMirrorSerializer();
-var json = serializer.serializeValue(mirror);
+var json = JSON.stringify(serializer.serializeValue(mirror));
// Check the mirror hierachy.
assertTrue(mirror instanceof debug.Mirror);
diff --git a/deps/v8/test/mjsunit/mirror-unresolved-function.js b/deps/v8/test/mjsunit/mirror-unresolved-function.js
index 8d8ca37f8..c1fe4a3ef 100644
--- a/deps/v8/test/mjsunit/mirror-unresolved-function.js
+++ b/deps/v8/test/mjsunit/mirror-unresolved-function.js
@@ -42,8 +42,9 @@ MirrorRefCache.prototype.lookup = function(handle) {
var mirror = new debug.UnresolvedFunctionMirror("f");
var serializer = debug.MakeMirrorSerializer();
-var json = serializer.serializeValue(mirror);
-var refs = new MirrorRefCache(serializer.serializeReferencedObjects());
+var json = JSON.stringify(serializer.serializeValue(mirror));
+var refs = new MirrorRefCache(
+ JSON.stringify(serializer.serializeReferencedObjects()));
// Check the mirror hierachy for unresolved functions.
assertTrue(mirror instanceof debug.Mirror);
diff --git a/deps/v8/test/mjsunit/regexp.js b/deps/v8/test/mjsunit/regexp.js
index e562df090..0a23d00ac 100644
--- a/deps/v8/test/mjsunit/regexp.js
+++ b/deps/v8/test/mjsunit/regexp.js
@@ -375,3 +375,16 @@ assertFalse(/x([0-7]%%%x|[0-6]%%%y)/.test('x7%%%y'), 'qt8');
// Don't hang on this one.
/[^\xfe-\xff]*/.test("");
+
+
+var long = "a";
+for (var i = 0; i < 100000; i++) {
+ long = "a?" + long;
+}
+// Don't crash on this one, but maybe throw an exception.
+try {
+ RegExp(long).exec("a");
+} catch (e) {
+ assertTrue(String(e).indexOf("Stack overflow") >= 0, "overflow");
+}
+
diff --git a/deps/v8/test/mjsunit/bugs/bug-334.js b/deps/v8/test/mjsunit/regress/regress-334.js
index 024fc9e85..024fc9e85 100644
--- a/deps/v8/test/mjsunit/bugs/bug-334.js
+++ b/deps/v8/test/mjsunit/regress/regress-334.js
diff --git a/deps/v8/test/mjsunit/regress/regress-341.js b/deps/v8/test/mjsunit/regress/regress-341.js
new file mode 100644
index 000000000..4db6bc696
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-341.js
@@ -0,0 +1,36 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Should not crash.
+// See http://code.google.com/p/v8/issues/detail?id=341
+
+function F() {}
+
+F.prototype = 1;
+var o = {};
+
+assertThrows("o instanceof F");
diff --git a/deps/v8/test/mjsunit/regress/regress-349.js b/deps/v8/test/mjsunit/regress/regress-349.js
new file mode 100644
index 000000000..1a60e3e12
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-349.js
@@ -0,0 +1,32 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Should not crash.
+// See http://code.google.com/p/v8/issues/detail?id=349
+
+var str = "bbaabbbbbbbbabbaaaabbaaabbbaaaabbaaabbabaaabb";
+assertEquals(str, str.replace(/aabab/g, "foo"));
diff --git a/deps/v8/test/mjsunit/regress/regress-351.js b/deps/v8/test/mjsunit/regress/regress-351.js
new file mode 100644
index 000000000..44470db32
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-351.js
@@ -0,0 +1,31 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Should use index of 0 if provided index is negative.
+// See http://code.google.com/p/v8/issues/detail?id=351
+
+assertEquals(0, "test".lastIndexOf("test", -1));
diff --git a/deps/v8/test/mjsunit/string-lastindexof.js b/deps/v8/test/mjsunit/string-lastindexof.js
index bf46666ee..27378f789 100644
--- a/deps/v8/test/mjsunit/string-lastindexof.js
+++ b/deps/v8/test/mjsunit/string-lastindexof.js
@@ -27,25 +27,62 @@
var s = "test test test";
-assertEquals(5, s.lastIndexOf("test", 5));
-assertEquals(5, s.lastIndexOf("test", 6));
-assertEquals(0, s.lastIndexOf("test", 4));
-assertEquals(0, s.lastIndexOf("test", 0));
-assertEquals(-1, s.lastIndexOf("test", -1));
-assertEquals(10, s.lastIndexOf("test"));
-assertEquals(-1, s.lastIndexOf("notpresent"));
-assertEquals(-1, s.lastIndexOf());
-assertEquals(10, s.lastIndexOf("test", "not a number"));
+var MAX_DOUBLE = 1.7976931348623157e+308;
+var MIN_DOUBLE = -MAX_DOUBLE;
+var MAX_SMI = Math.pow(2,30)-1;
+var MIN_SMI = -Math.pow(2,30);
+
+assertEquals(10, s.lastIndexOf("test", Infinity), "tinf");
+assertEquals(10, s.lastIndexOf("test", MAX_DOUBLE), "tmaxdouble");
+assertEquals(10, s.lastIndexOf("test", MAX_SMI), "tmaxsmi");
+assertEquals(10, s.lastIndexOf("test", s.length * 2), "t2length");
+assertEquals(10, s.lastIndexOf("test", 15), "t15");
+assertEquals(10, s.lastIndexOf("test", 14), "t14");
+assertEquals(10, s.lastIndexOf("test", 10), "t10");
+assertEquals(5, s.lastIndexOf("test", 9), "t9");
+assertEquals(5, s.lastIndexOf("test", 6), "t6");
+assertEquals(5, s.lastIndexOf("test", 5), "t5");
+assertEquals(0, s.lastIndexOf("test", 4), "t4");
+assertEquals(0, s.lastIndexOf("test", 0), "t0");
+assertEquals(0, s.lastIndexOf("test", -1), "t-1");
+assertEquals(0, s.lastIndexOf("test", -s.length), "t-len");
+assertEquals(0, s.lastIndexOf("test", MIN_SMI), "tminsmi");
+assertEquals(0, s.lastIndexOf("test", MIN_DOUBLE), "tmindouble");
+assertEquals(0, s.lastIndexOf("test", -Infinity), "tneginf");
+assertEquals(10, s.lastIndexOf("test"), "t");
+assertEquals(-1, s.lastIndexOf("notpresent"), "n");
+assertEquals(-1, s.lastIndexOf(), "none");
+assertEquals(10, s.lastIndexOf("test", "not a number"), "nan");
+
+var longNonMatch = "overlong string that doesn't match";
+var longAlmostMatch = "test test test!";
+var longAlmostMatch2 = "!test test test";
+
+
+assertEquals(-1, s.lastIndexOf(longNonMatch), "long");
+assertEquals(-1, s.lastIndexOf(longNonMatch, 10), "longpos");
+assertEquals(-1, s.lastIndexOf(longNonMatch, NaN), "longnan");
+assertEquals(-1, s.lastIndexOf(longAlmostMatch), "tlong");
+assertEquals(-1, s.lastIndexOf(longAlmostMatch, 10), "tlongpos");
+assertEquals(-1, s.lastIndexOf(longAlmostMatch), "tlongnan");
+
+var nonInitialMatch = "est";
+
+assertEquals(-1, s.lastIndexOf(nonInitialMatch, 0), "noninit");
+assertEquals(-1, s.lastIndexOf(nonInitialMatch, -1), "noninitneg");
+assertEquals(-1, s.lastIndexOf(nonInitialMatch, MIN_SMI), "noninitminsmi");
+assertEquals(-1, s.lastIndexOf(nonInitialMatch, MIN_DOUBLE), "noninitmindbl");
+assertEquals(-1, s.lastIndexOf(nonInitialMatch, -Infinity), "noninitneginf");
for (var i = s.length + 10; i >= 0; i--) {
var expected = i < s.length ? i : s.length;
- assertEquals(expected, s.lastIndexOf("", i));
+ assertEquals(expected, s.lastIndexOf("", i), "empty" + i);
}
var reString = "asdf[a-z]+(asdf)?";
-assertEquals(4, reString.lastIndexOf("[a-z]+"));
-assertEquals(10, reString.lastIndexOf("(asdf)?"));
+assertEquals(4, reString.lastIndexOf("[a-z]+"), "r4");
+assertEquals(10, reString.lastIndexOf("(asdf)?"), "r10");
-assertEquals(1, String.prototype.lastIndexOf.length);
+assertEquals(1, String.prototype.lastIndexOf.length, "length");
diff --git a/deps/v8/tools/gyp/v8.gyp b/deps/v8/tools/gyp/v8.gyp
index e980ff167..66e1bb66d 100644
--- a/deps/v8/tools/gyp/v8.gyp
+++ b/deps/v8/tools/gyp/v8.gyp
@@ -2,7 +2,7 @@
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
-#
+#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
@@ -12,7 +12,7 @@
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
-#
+#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
@@ -29,6 +29,7 @@
'variables': {
'chromium_code': 1,
'msvs_use_common_release': 0,
+ 'gcc_version%': 'unknown',
'base_source_files': [
'../../src/arm/assembler-arm-inl.h',
'../../src/arm/assembler-arm.cc',
@@ -135,6 +136,7 @@
'../../src/frames-inl.h',
'../../src/frames.cc',
'../../src/frames.h',
+ '../../src/frame-element.h',
'../../src/func-name-inferrer.cc',
'../../src/func-name-inferrer.h',
'../../src/global-handles.cc',
@@ -155,6 +157,7 @@
'../../src/interpreter-irregexp.h',
'../../src/jump-target.cc',
'../../src/jump-target.h',
+ '../../src/jump-target-inl.h',
'../../src/jsregexp-inl.h',
'../../src/jsregexp.cc',
'../../src/jsregexp.h',
@@ -162,6 +165,8 @@
'../../src/list.h',
'../../src/log.cc',
'../../src/log.h',
+ '../../src/log-utils.cc',
+ '../../src/log-utils.h',
'../../src/macro-assembler.h',
'../../src/mark-compact.cc',
'../../src/mark-compact.h',
@@ -297,6 +302,16 @@
'-fomit-frame-pointer',
'-O3',
],
+ 'conditions': [
+ [ 'gcc_version=="44"', {
+ 'cflags': [
+ # Avoid gcc 4.4 strict aliasing issues in dtoa.c
+ '-fno-strict-aliasing',
+ # Avoid crashes with gcc 4.4 in the v8 test suite.
+ '-fno-tree-vrp',
+ ],
+ }],
+ ],
'cflags_cc': [
'-fno-rtti',
],
diff --git a/deps/v8/tools/linux-tick-processor b/deps/v8/tools/linux-tick-processor
index 0ad295f14..968c24129 100644
--- a/deps/v8/tools/linux-tick-processor
+++ b/deps/v8/tools/linux-tick-processor
@@ -1,16 +1,15 @@
#!/bin/sh
-tools_dir=$(dirname "$0")
+tools_path=`cd $(dirname "$0");pwd`
+[ "$D8_PATH" ] || D8_PATH=$tools_path/..
+d8_exec=$D8_PATH/d8
-if [ "$1" != "--no-build" ]
-then
- scons -C $tools_dir/.. d8
-else
- shift
-fi
+# compile d8 if it doesn't exist, assuming this script
+# resides in the repository.
+[ -x $d8_exec ] || scons -j4 -C $D8_PATH -Y $tools_path/.. d8
-# nm spits out 'no symbols found' messages on stderr
-$tools_dir/../d8 $tools_dir/splaytree.js $tools_dir/codemap.js \
- $tools_dir/csvparser.js $tools_dir/consarray.js \
- $tools_dir/profile.js $tools_dir/profile_view.js \
- $tools_dir/tickprocessor.js -- $@ 2>/dev/null
+# nm spits out 'no symbols found' messages to stderr.
+$d8_exec $tools_path/splaytree.js $tools_path/codemap.js \
+ $tools_path/csvparser.js $tools_path/consarray.js \
+ $tools_path/profile.js $tools_path/profile_view.js \
+ $tools_path/tickprocessor.js -- $@ 2>/dev/null
diff --git a/deps/v8/tools/profile_view.js b/deps/v8/tools/profile_view.js
index cd0511f6b..bdea6319d 100644
--- a/deps/v8/tools/profile_view.js
+++ b/deps/v8/tools/profile_view.js
@@ -46,15 +46,25 @@ devtools.profiler.ViewBuilder = function(samplingRate) {
* Builds a profile view for the specified call tree.
*
* @param {devtools.profiler.CallTree} callTree A call tree.
+ * @param {boolean} opt_bottomUpViewWeights Whether remapping
+ * of self weights for a bottom up view is needed.
*/
devtools.profiler.ViewBuilder.prototype.buildView = function(
- callTree) {
+ callTree, opt_bottomUpViewWeights) {
var head;
var samplingRate = this.samplingRate;
+ var createViewNode = this.createViewNode;
callTree.traverse(function(node, viewParent) {
- var viewNode = new devtools.profiler.ProfileView.Node(
- node.label, node.totalWeight * samplingRate,
- node.selfWeight * samplingRate, head);
+ var totalWeight = node.totalWeight * samplingRate;
+ var selfWeight = node.selfWeight * samplingRate;
+ if (opt_bottomUpViewWeights === true) {
+ if (viewParent === head) {
+ selfWeight = totalWeight;
+ } else {
+ selfWeight = 0;
+ }
+ }
+ var viewNode = createViewNode(node.label, totalWeight, selfWeight, head);
if (viewParent) {
viewParent.addChild(viewNode);
} else {
@@ -62,41 +72,50 @@ devtools.profiler.ViewBuilder.prototype.buildView = function(
}
return viewNode;
});
- var view = new devtools.profiler.ProfileView(head);
+ var view = this.createView(head);
return view;
};
/**
- * Creates a Profile View object. It allows to perform sorting
- * and filtering actions on the profile. Profile View mimicks
- * the Profile object from WebKit's JSC profiler.
+ * Factory method for a profile view.
*
- * @param {devtools.profiler.ProfileView.Node} head Head (root) node.
- * @constructor
+ * @param {devtools.profiler.ProfileView.Node} head View head node.
+ * @return {devtools.profiler.ProfileView} Profile view.
*/
-devtools.profiler.ProfileView = function(head) {
- this.head = head;
- this.title = '';
- this.uid = '';
- this.heavyProfile = null;
- this.treeProfile = null;
- this.flatProfile = null;
+devtools.profiler.ViewBuilder.prototype.createView = function(head) {
+ return new devtools.profiler.ProfileView(head);
};
/**
- * Updates references between profiles. This is needed for WebKit
- * ProfileView.
+ * Factory method for a profile view node.
+ *
+ * @param {string} internalFuncName A fully qualified function name.
+ * @param {number} totalTime Amount of time that application spent in the
+ * corresponding function and its descendants (not that depending on
+ * profile they can be either callees or callers.)
+ * @param {number} selfTime Amount of time that application spent in the
+ * corresponding function only.
+ * @param {devtools.profiler.ProfileView.Node} head Profile view head.
+ * @return {devtools.profiler.ProfileView.Node} Profile view node.
*/
-devtools.profiler.ProfileView.prototype.updateProfilesRefs = function() {
- var profileNames = ["treeProfile", "heavyProfile", "flatProfile"];
- for (var i = 0; i < profileNames.length; ++i) {
- var destProfile = this[profileNames[i]];
- for (var j = 0; j < profileNames.length; ++j) {
- destProfile[profileNames[j]] = this[profileNames[j]];
- }
- }
+devtools.profiler.ViewBuilder.prototype.createViewNode = function(
+ funcName, totalTime, selfTime, head) {
+ return new devtools.profiler.ProfileView.Node(
+ funcName, totalTime, selfTime, head);
+};
+
+
+/**
+ * Creates a Profile View object. It allows to perform sorting
+ * and filtering actions on the profile.
+ *
+ * @param {devtools.profiler.ProfileView.Node} head Head (root) node.
+ * @constructor
+ */
+devtools.profiler.ProfileView = function(head) {
+ this.head = head;
};
@@ -115,73 +134,6 @@ devtools.profiler.ProfileView.prototype.sort = function(sortFunc) {
/**
- * Sorts the profile view by self time, ascending.
- */
-devtools.profiler.ProfileView.prototype.sortSelfTimeAscending = function() {
- this.sort(function (node1, node2) {
- return node1.selfTime - node2.selfTime; });
-};
-
-
-/**
- * Sorts the profile view by self time, descending.
- */
-devtools.profiler.ProfileView.prototype.sortSelfTimeDescending = function() {
- this.sort(function (node1, node2) {
- return node2.selfTime - node1.selfTime; });
-};
-
-
-/**
- * Sorts the profile view by total time, ascending.
- */
-devtools.profiler.ProfileView.prototype.sortTotalTimeAscending = function() {
- this.sort(function (node1, node2) {
- return node1.totalTime - node2.totalTime; });
-};
-
-
-/**
- * Sorts the profile view by total time, descending.
- */
-devtools.profiler.ProfileView.prototype.sortTotalTimeDescending = function() {
- this.sort(function (node1, node2) {
- return node2.totalTime - node1.totalTime; });
-};
-
-
-/**
- * String comparator compatible with Array.sort requirements.
- *
- * @param {string} s1 First string.
- * @param {string} s2 Second string.
- */
-devtools.profiler.ProfileView.compareStrings = function(s1, s2) {
- return s1 < s2 ? -1 : (s1 > s2 ? 1 : 0);
-};
-
-
-/**
- * Sorts the profile view by function name, ascending.
- */
-devtools.profiler.ProfileView.prototype.sortFunctionNameAscending = function() {
- this.sort(function (node1, node2) {
- return devtools.profiler.ProfileView.compareStrings(
- node1.functionName, node2.functionName); });
-};
-
-
-/**
- * Sorts the profile view by function name, descending.
- */
-devtools.profiler.ProfileView.prototype.sortFunctionNameDescending = function() {
- this.sort(function (node1, node2) {
- return devtools.profiler.ProfileView.compareStrings(
- node2.functionName, node1.functionName); });
-};
-
-
-/**
* Traverses profile view nodes in preorder.
*
* @param {function(devtools.profiler.ProfileView.Node)} f Visitor function.
@@ -212,63 +164,12 @@ devtools.profiler.ProfileView.prototype.traverse = function(f) {
*/
devtools.profiler.ProfileView.Node = function(
internalFuncName, totalTime, selfTime, head) {
- this.callIdentifier = 0;
this.internalFuncName = internalFuncName;
- this.initFuncInfo();
this.totalTime = totalTime;
this.selfTime = selfTime;
this.head = head;
this.parent = null;
this.children = [];
- this.visible = true;
-};
-
-
-/**
- * RegEx for stripping V8's prefixes of compiled functions.
- */
-devtools.profiler.ProfileView.Node.FUNC_NAME_STRIP_RE =
- /^(?:LazyCompile|Function): (.*)$/;
-
-
-/**
- * RegEx for extracting script source URL and line number.
- */
-devtools.profiler.ProfileView.Node.FUNC_NAME_PARSE_RE = /^([^ ]+) (.*):(\d+)$/;
-
-
-/**
- * RegEx for removing protocol name from URL.
- */
-devtools.profiler.ProfileView.Node.URL_PARSE_RE = /^(?:http:\/)?.*\/([^/]+)$/;
-
-
-/**
- * Inits 'functionName', 'url', and 'lineNumber' fields using 'internalFuncName'
- * field.
- */
-devtools.profiler.ProfileView.Node.prototype.initFuncInfo = function() {
- var nodeAlias = devtools.profiler.ProfileView.Node;
- this.functionName = this.internalFuncName;
-
- var strippedName = nodeAlias.FUNC_NAME_STRIP_RE.exec(this.functionName);
- if (strippedName) {
- this.functionName = strippedName[1];
- }
-
- var parsedName = nodeAlias.FUNC_NAME_PARSE_RE.exec(this.functionName);
- if (parsedName) {
- this.url = parsedName[2];
- var parsedUrl = nodeAlias.URL_PARSE_RE.exec(this.url);
- if (parsedUrl) {
- this.url = parsedUrl[1];
- }
- this.functionName = parsedName[1];
- this.lineNumber = parsedName[3];
- } else {
- this.url = '';
- this.lineNumber = 0;
- }
};
diff --git a/deps/v8/tools/run-valgrind.py b/deps/v8/tools/run-valgrind.py
index ccb9309db..8a0869cba 100755
--- a/deps/v8/tools/run-valgrind.py
+++ b/deps/v8/tools/run-valgrind.py
@@ -56,7 +56,7 @@ if code != 0:
# Look through the leak details and make sure that we don't
# have any definitely, indirectly, and possibly lost bytes.
-LEAK_RE = r"(?:definitely|indirectly|possibly) lost: \d+ bytes in \d+ blocks."
+LEAK_RE = r"(?:definitely|indirectly|possibly) lost: "
LEAK_LINE_MATCHER = re.compile(LEAK_RE)
LEAK_OKAY_MATCHER = re.compile(r"lost: 0 bytes in 0 blocks.")
leaks = []
diff --git a/deps/v8/tools/test.py b/deps/v8/tools/test.py
index 9981e8cb4..6bd536b38 100755
--- a/deps/v8/tools/test.py
+++ b/deps/v8/tools/test.py
@@ -164,6 +164,8 @@ class SimpleProgressIndicator(ProgressIndicator):
print "Command: %s" % EscapeCommand(failed.command)
if failed.HasCrashed():
print "--- CRASHED ---"
+ if failed.HasTimedOut():
+ print "--- TIMEOUT ---"
if len(self.failed) == 0:
print "==="
print "=== All tests succeeded"
@@ -207,6 +209,9 @@ class DotsProgressIndicator(SimpleProgressIndicator):
if output.HasCrashed():
sys.stdout.write('C')
sys.stdout.flush()
+ elif output.HasTimedOut():
+ sys.stdout.write('T')
+ sys.stdout.flush()
else:
sys.stdout.write('F')
sys.stdout.flush()
@@ -245,6 +250,8 @@ class CompactProgressIndicator(ProgressIndicator):
print "Command: %s" % EscapeCommand(output.command)
if output.HasCrashed():
print "--- CRASHED ---"
+ if output.HasTimedOut():
+ print "--- TIMEOUT ---"
def Truncate(self, str, length):
if length and (len(str) > (length - 3)):
@@ -381,6 +388,9 @@ class TestOutput(object):
return self.output.exit_code < 0 and \
self.output.exit_code != -signal.SIGABRT
+ def HasTimedOut(self):
+ return self.output.timed_out;
+
def HasFailed(self):
execution_failed = self.test.DidFail(self.output)
if self.test.IsNegative():
diff --git a/deps/v8/tools/tickprocessor.js b/deps/v8/tools/tickprocessor.js
index 196daa984..477ab2682 100644
--- a/deps/v8/tools/tickprocessor.js
+++ b/deps/v8/tools/tickprocessor.js
@@ -273,10 +273,6 @@ TickProcessor.prototype.printStatistics = function() {
this.printCounter(this.ticks_.unaccounted, this.ticks_.total);
}
- // Disable initialization of 'funcName', 'url', 'lineNumber' as
- // we don't use it and it just wastes time.
- devtools.profiler.ProfileView.Node.prototype.initFuncInfo = function() {};
-
var flatProfile = this.profile_.getFlatProfile();
var flatView = this.viewBuilder_.buildView(flatProfile);
// Sort by self time, desc, then by name, desc.
diff --git a/deps/v8/tools/v8.xcodeproj/project.pbxproj b/deps/v8/tools/v8.xcodeproj/project.pbxproj
index 83212fb42..2a7cb2db5 100755
--- a/deps/v8/tools/v8.xcodeproj/project.pbxproj
+++ b/deps/v8/tools/v8.xcodeproj/project.pbxproj
@@ -205,6 +205,8 @@
89F23C9F0E78D604006B2466 /* simulator-arm.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF17D0E719B8F00D62E90 /* simulator-arm.cc */; };
89F23CA00E78D609006B2466 /* stub-cache-arm.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF18A0E719B8F00D62E90 /* stub-cache-arm.cc */; };
89FB0E3A0F8E533F00B04B3C /* d8-posix.cc in Sources */ = {isa = PBXBuildFile; fileRef = 89FB0E360F8E531900B04B3C /* d8-posix.cc */; };
+ 9F4B7B890FCC877A00DC4117 /* log-utils.cc in Sources */ = {isa = PBXBuildFile; fileRef = 9F4B7B870FCC877A00DC4117 /* log-utils.cc */; };
+ 9F4B7B8A0FCC877A00DC4117 /* log-utils.cc in Sources */ = {isa = PBXBuildFile; fileRef = 9F4B7B870FCC877A00DC4117 /* log-utils.cc */; };
9F92FAA90F8F28AD0089F02C /* func-name-inferrer.cc in Sources */ = {isa = PBXBuildFile; fileRef = 9F92FAA70F8F28AD0089F02C /* func-name-inferrer.cc */; };
9F92FAAA0F8F28AD0089F02C /* func-name-inferrer.cc in Sources */ = {isa = PBXBuildFile; fileRef = 9F92FAA70F8F28AD0089F02C /* func-name-inferrer.cc */; };
9FC86ABD0F5FEDAC00F22668 /* oprofile-agent.cc in Sources */ = {isa = PBXBuildFile; fileRef = 9FC86ABB0F5FEDAC00F22668 /* oprofile-agent.cc */; };
@@ -527,6 +529,8 @@
89F23C950E78D5B6006B2466 /* v8_shell-arm */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.executable"; includeInIndex = 0; path = "v8_shell-arm"; sourceTree = BUILT_PRODUCTS_DIR; };
89FB0E360F8E531900B04B3C /* d8-posix.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = "d8-posix.cc"; path = "../src/d8-posix.cc"; sourceTree = "<group>"; };
89FB0E370F8E531900B04B3C /* d8-windows.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = "d8-windows.cc"; path = "../src/d8-windows.cc"; sourceTree = "<group>"; };
+ 9F4B7B870FCC877A00DC4117 /* log-utils.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = "log-utils.cc"; sourceTree = "<group>"; };
+ 9F4B7B880FCC877A00DC4117 /* log-utils.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "log-utils.h"; sourceTree = "<group>"; };
9F92FAA70F8F28AD0089F02C /* func-name-inferrer.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = "func-name-inferrer.cc"; sourceTree = "<group>"; };
9F92FAA80F8F28AD0089F02C /* func-name-inferrer.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "func-name-inferrer.h"; sourceTree = "<group>"; };
9FC86ABB0F5FEDAC00F22668 /* oprofile-agent.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = "oprofile-agent.cc"; sourceTree = "<group>"; };
@@ -732,6 +736,8 @@
897FF1510E719B8F00D62E90 /* list.h */,
897FF1520E719B8F00D62E90 /* log.cc */,
897FF1530E719B8F00D62E90 /* log.h */,
+ 9F4B7B870FCC877A00DC4117 /* log-utils.cc */,
+ 9F4B7B880FCC877A00DC4117 /* log-utils.h */,
897FF1540E719B8F00D62E90 /* macro-assembler-arm.cc */,
897FF1550E719B8F00D62E90 /* macro-assembler-arm.h */,
897FF1560E719B8F00D62E90 /* macro-assembler-ia32.cc */,
@@ -1187,6 +1193,7 @@
58950D660F5551C200F3E8BA /* virtual-frame.cc in Sources */,
58950D670F5551C400F3E8BA /* virtual-frame-ia32.cc in Sources */,
89A88E2E0E71A6D60043BA31 /* zone.cc in Sources */,
+ 9F4B7B890FCC877A00DC4117 /* log-utils.cc in Sources */,
);
runOnlyForDeploymentPostprocessing = 0;
};
@@ -1290,6 +1297,7 @@
58950D680F5551CB00F3E8BA /* virtual-frame.cc in Sources */,
58950D690F5551CE00F3E8BA /* virtual-frame-arm.cc in Sources */,
89F23C820E78D5B2006B2466 /* zone.cc in Sources */,
+ 9F4B7B8A0FCC877A00DC4117 /* log-utils.cc in Sources */,
);
runOnlyForDeploymentPostprocessing = 0;
};
diff --git a/deps/v8/tools/visual_studio/v8_base.vcproj b/deps/v8/tools/visual_studio/v8_base.vcproj
index b1802eff7..afd73f4cb 100644
--- a/deps/v8/tools/visual_studio/v8_base.vcproj
+++ b/deps/v8/tools/visual_studio/v8_base.vcproj
@@ -409,6 +409,10 @@
>
</File>
<File
+ RelativePath="..\..\src\frame-element.h"
+ >
+ </File>
+ <File
RelativePath="..\..\src\frames.cc"
>
</File>
@@ -501,6 +505,10 @@
>
</File>
<File
+ RelativePath="..\..\src\jump-target-inl.h"
+ >
+ </File>
+ <File
RelativePath="..\..\src\jump-target.cc"
>
</File>
@@ -537,6 +545,14 @@
>
</File>
<File
+ RelativePath="..\..\src\log-utils.cc"
+ >
+ </File>
+ <File
+ RelativePath="..\..\src\log-utils.h"
+ >
+ </File>
+ <File
RelativePath="..\..\src\ia32\macro-assembler-ia32.cc"
>
</File>
diff --git a/deps/v8/tools/visual_studio/v8_base_arm.vcproj b/deps/v8/tools/visual_studio/v8_base_arm.vcproj
index 35a08d5ae..ca0a2daeb 100644
--- a/deps/v8/tools/visual_studio/v8_base_arm.vcproj
+++ b/deps/v8/tools/visual_studio/v8_base_arm.vcproj
@@ -541,6 +541,14 @@
>
</File>
<File
+ RelativePath="..\..\src\log-utils.cc"
+ >
+ </File>
+ <File
+ RelativePath="..\..\src\log-utils.h"
+ >
+ </File>
+ <File
RelativePath="..\..\src\arm\macro-assembler-arm.cc"
>
</File>
diff --git a/deps/v8/tools/visual_studio/v8_cctest.vcproj b/deps/v8/tools/visual_studio/v8_cctest.vcproj
index 6aa090ad0..97de44674 100644
--- a/deps/v8/tools/visual_studio/v8_cctest.vcproj
+++ b/deps/v8/tools/visual_studio/v8_cctest.vcproj
@@ -206,6 +206,10 @@
>
</File>
<File
+ RelativePath="..\..\test\cctest\test-log-utils.cc"
+ >
+ </File>
+ <File
RelativePath="..\..\test\cctest\test-log-ia32.cc"
>
</File>
diff --git a/deps/v8/tools/visual_studio/v8_cctest_arm.vcproj b/deps/v8/tools/visual_studio/v8_cctest_arm.vcproj
index 566d75eef..a027a8414 100644
--- a/deps/v8/tools/visual_studio/v8_cctest_arm.vcproj
+++ b/deps/v8/tools/visual_studio/v8_cctest_arm.vcproj
@@ -200,6 +200,10 @@
>
</File>
<File
+ RelativePath="..\..\test\cctest\test-log-utils.cc"
+ >
+ </File>
+ <File
RelativePath="..\..\test\cctest\test-mark-compact.cc"
>
</File>